本文整理汇总了Python中statsmodels.compat.python.lrange函数的典型用法代码示例。如果您正苦于以下问题:Python lrange函数的具体用法?Python lrange怎么用?Python lrange使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lrange函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_wls_example
def test_wls_example():
#example from the docstring, there was a note about a bug, should
#be fixed now
Y = [1,3,4,5,2,3,4]
X = lrange(1,8)
X = add_constant(X, prepend=False)
wls_model = WLS(Y,X, weights=lrange(1,8)).fit()
#taken from R lm.summary
assert_almost_equal(wls_model.fvalue, 0.127337843215, 6)
assert_almost_equal(wls_model.scale, 2.44608530786**2, 6)
示例2: test_arma_order_select_ic
def test_arma_order_select_ic():
# smoke test, assumes info-criteria are right
from statsmodels.tsa.arima_process import arma_generate_sample
arparams = np.array([.75, -.25])
maparams = np.array([.65, .35])
arparams = np.r_[1, -arparams]
maparam = np.r_[1, maparams]
nobs = 250
np.random.seed(2014)
y = arma_generate_sample(arparams, maparams, nobs)
res = arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
# regression tests in case we change algorithm to minic in sas
aic_x = np.array([[ np.nan, 552.7342255 , 484.29687843],
[ 562.10924262, 485.5197969 , 480.32858497],
[ 507.04581344, 482.91065829, 481.91926034],
[ 484.03995962, 482.14868032, 483.86378955],
[ 481.8849479 , 483.8377379 , 485.83756612]])
bic_x = np.array([[ np.nan, 559.77714733, 494.86126118],
[ 569.15216446, 496.08417966, 494.41442864],
[ 517.61019619, 496.99650196, 499.52656493],
[ 498.12580329, 499.75598491, 504.99255506],
[ 499.49225249, 504.96650341, 510.48779255]])
aic = DataFrame(aic_x, index=lrange(5), columns=lrange(3))
bic = DataFrame(bic_x, index=lrange(5), columns=lrange(3))
assert_almost_equal(res.aic.values, aic.values, 5)
assert_almost_equal(res.bic.values, bic.values, 5)
assert_equal(res.aic_min_order, (1, 2))
assert_equal(res.bic_min_order, (1, 2))
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_(res.bic.index.equals(bic.index))
assert_(res.bic.columns.equals(bic.columns))
index = pd.date_range('2000-1-1', freq='M', periods=len(y))
y_series = pd.Series(y, index=index)
res_pd = arma_order_select_ic(y_series, max_ar=2, max_ma=1,
ic=['aic', 'bic'], trend='nc')
assert_almost_equal(res_pd.aic.values, aic.values[:3, :2], 5)
assert_almost_equal(res_pd.bic.values, bic.values[:3, :2], 5)
assert_equal(res_pd.aic_min_order, (2, 1))
assert_equal(res_pd.bic_min_order, (1, 1))
res = arma_order_select_ic(y, ic='aic', trend='nc')
assert_almost_equal(res.aic.values, aic.values, 5)
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_equal(res.aic_min_order, (1, 2))
示例3: print_ic_table
def print_ic_table(ics, selected_orders):
"""
For VAR order selection
"""
# Can factor this out into a utility method if so desired
cols = sorted(ics)
data = mat([["%#10.4g" % v for v in ics[c]] for c in cols],
dtype=object).T
# start minimums
for i, col in enumerate(cols):
idx = int(selected_orders[col]), i
data[idx] = data[idx] + '*'
# data[idx] = data[idx][:-1] + '*' # super hack, ugh
fmt = dict(_default_table_fmt,
data_fmts=("%s",) * len(cols))
buf = StringIO()
table = SimpleTable(data, cols, lrange(len(data)),
title='VAR Order Selection', txt_fmt=fmt)
buf.write(str(table) + '\n')
buf.write('* Minimum' + '\n')
print(buf.getvalue())
示例4: _plot_leverage_resid2
def _plot_leverage_resid2(results, influence, alpha=.05, ax=None,
**kwargs):
from scipy.stats import zscore, norm
fig, ax = utils.create_mpl_ax(ax)
infl = influence
leverage = infl.hat_matrix_diag
resid = zscore(infl.resid)
ax.plot(resid**2, leverage, 'o', **kwargs)
ax.set_xlabel("Normalized residuals**2")
ax.set_ylabel("Leverage")
ax.set_title("Leverage vs. Normalized residuals squared")
large_leverage = leverage > _high_leverage(results)
#norm or t here if standardized?
cutoff = norm.ppf(1.-alpha/2)
large_resid = np.abs(resid) > cutoff
labels = results.model.data.row_labels
if labels is None:
labels = lrange(int(results.nobs))
index = np.where(np.logical_or(large_leverage, large_resid))[0]
ax = utils.annotate_axes(index, labels, lzip(resid**2, leverage),
[(0, 5)]*int(results.nobs), "large",
ax=ax, ha="center", va="bottom")
ax.margins(.075, .075)
return fig
示例5: _make_predict_dates
def _make_predict_dates(self):
data = self.data
dtstart = data.predict_start
dtend = data.predict_end
freq = data.freq
if freq is not None:
pandas_freq = _freq_to_pandas[freq]
try:
from pandas import DatetimeIndex
dates = DatetimeIndex(start=dtstart, end=dtend,
freq=pandas_freq)
except ImportError as err:
from pandas import DateRange
dates = DateRange(dtstart, dtend, offset = pandas_freq).values
# handle
elif freq is None and (isinstance(dtstart, int) and
isinstance(dtend, int)):
from pandas import Index
dates = Index(lrange(dtstart, dtend+1))
# if freq is None and dtstart and dtend aren't integers, we're
# in sample
else:
dates = self.data.dates
start = self._get_dates_loc(dates, dtstart)
end = self._get_dates_loc(dates, dtend)
dates = dates[start:end+1] # is this index inclusive?
self.data.predict_dates = dates
示例6: maineffect_func
def maineffect_func(value, reference=reference):
rvalue = []
keep = lrange(value.shape[0])
keep.pop(reference)
for i in range(len(keep)):
rvalue.append(value[keep[i]] - value[reference])
return np.array(rvalue)
示例7: _make_predict_dates
def _make_predict_dates(self):
data = self.data
dtstart = data.predict_start
dtend = data.predict_end
freq = data.freq
if freq is not None:
pandas_freq = _freq_to_pandas[freq]
# preserve PeriodIndex or DatetimeIndex
dates = self.data.dates.__class__(start=dtstart,
end=dtend,
freq=pandas_freq)
# handle
elif freq is None and (isinstance(dtstart, (int, long)) and
isinstance(dtend, (int, long))):
from pandas import Index
dates = Index(lrange(dtstart, dtend+1))
# if freq is None and dtstart and dtend aren't integers, we're
# in sample
else:
dates = self.data.dates
start = self._get_dates_loc(dates, dtstart)
end = self._get_dates_loc(dates, dtend)
dates = dates[start:end+1] # is this index inclusive?
self.data.predict_dates = dates
示例8: test_pickle
def test_pickle():
import tempfile
from numpy.testing import assert_equal
tmpdir = tempfile.mkdtemp(prefix='pickle')
a = lrange(10)
save_pickle(a, tmpdir+'/res.pkl')
b = load_pickle(tmpdir+'/res.pkl')
assert_equal(a, b)
#cleanup, tested on Windows
try:
import os
os.remove(tmpdir+'/res.pkl')
os.rmdir(tmpdir)
except (OSError, IOError):
pass
assert not os.path.exists(tmpdir)
#test with file handle
fh = BytesIO()
save_pickle(a, fh)
fh.seek(0,0)
c = load_pickle(fh)
fh.close()
assert_equal(a,b)
示例9: plot_with_error
def plot_with_error(y, error, x=None, axes=None, value_fmt='k',
error_fmt='k--', alpha=0.05, stderr_type = 'asym'):
"""
Make plot with optional error bars
Parameters
----------
y :
error : array or None
"""
import matplotlib.pyplot as plt
if axes is None:
axes = plt.gca()
x = x if x is not None else lrange(len(y))
plot_action = lambda y, fmt: axes.plot(x, y, fmt)
plot_action(y, value_fmt)
#changed this
if error is not None:
if stderr_type == 'asym':
q = util.norm_signif_level(alpha)
plot_action(y - q * error, error_fmt)
plot_action(y + q * error, error_fmt)
if stderr_type in ('mc','sz1','sz2','sz3'):
plot_action(error[0], error_fmt)
plot_action(error[1], error_fmt)
示例10: irf_grid_plot
def irf_grid_plot(values, stderr, impcol, rescol, names, title,
signif=0.05, hlines=None, subplot_params=None,
plot_params=None, figsize=(10,10), stderr_type='asym'):
"""
Reusable function to make flexible grid plots of impulse responses and
comulative effects
values : (T + 1) x k x k
stderr : T x k x k
hlines : k x k
"""
import matplotlib.pyplot as plt
if subplot_params is None:
subplot_params = {}
if plot_params is None:
plot_params = {}
nrows, ncols, to_plot = _get_irf_plot_config(names, impcol, rescol)
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True,
squeeze=False, figsize=figsize)
# fill out space
adjust_subplots()
fig.suptitle(title, fontsize=14)
subtitle_temp = r'%s$\rightarrow$%s'
k = len(names)
rng = lrange(len(values))
for (j, i, ai, aj) in to_plot:
ax = axes[ai][aj]
# HACK?
if stderr is not None:
if stderr_type == 'asym':
sig = np.sqrt(stderr[:, j * k + i, j * k + i])
plot_with_error(values[:, i, j], sig, x=rng, axes=ax,
alpha=signif, value_fmt='b', stderr_type=stderr_type)
if stderr_type in ('mc','sz1','sz2','sz3'):
errs = stderr[0][:, i, j], stderr[1][:, i, j]
plot_with_error(values[:, i, j], errs, x=rng, axes=ax,
alpha=signif, value_fmt='b', stderr_type=stderr_type)
else:
plot_with_error(values[:, i, j], None, x=rng, axes=ax,
value_fmt='b')
ax.axhline(0, color='k')
if hlines is not None:
ax.axhline(hlines[i,j], color='k')
sz = subplot_params.get('fontsize', 12)
ax.set_title(subtitle_temp % (names[j], names[i]), fontsize=sz)
return fig
示例11: _maybe_reset_index
def _maybe_reset_index(data):
"""
All the Rdatasets have the integer row.labels from R if there is no
real index. Strip this for a zero-based index
"""
if data.index.equals(Index(lrange(1, len(data) + 1))):
data = data.reset_index(drop=True)
return data
示例12: variables
def variables(self):
"""
Returns a list of the dataset's StataVariables objects.
"""
return lmap(_StataVariable, zip(lrange(self._header['nvar']),
self._header['typlist'], self._header['varlist'],
self._header['srtlist'],
self._header['fmtlist'], self._header['lbllist'],
self._header['vlblist']))
示例13: __iter__
def __iter__(self):
n = self.n
p = self.p
comb = combinations(lrange(n), p)
for idx in comb:
test_index = np.zeros(n, dtype=np.bool)
test_index[np.array(idx)] = True
train_index = np.logical_not(test_index)
yield train_index, test_index
示例14: test__reduce_dict
def test__reduce_dict():
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), [1] * 8))
eq(_reduce_dict(data, ('m',)), 4)
eq(_reduce_dict(data, ('m', 'o')), 2)
eq(_reduce_dict(data, ('m', 'o', 'w')), 1)
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), lrange(8)))
eq(_reduce_dict(data, ('m',)), 6)
eq(_reduce_dict(data, ('m', 'o')), 1)
eq(_reduce_dict(data, ('m', 'o', 'w')), 0)
示例15: date_range_str
def date_range_str(start, end=None, length=None):
"""
Returns a list of abbreviated date strings.
Parameters
----------
start : str
The first abbreviated date, for instance, '1965q1' or '1965m1'
end : str, optional
The last abbreviated date if length is None.
length : int, optional
The length of the returned array of end is None.
Returns
-------
date_range : list
List of strings
"""
flags = re.IGNORECASE | re.VERBOSE
#_check_range_inputs(end, length, freq)
start = start.lower()
if re.search(_m_pattern, start, flags):
annual_freq = 12
split = 'm'
elif re.search(_q_pattern, start, flags):
annual_freq = 4
split = 'q'
elif re.search(_y_pattern, start, flags):
annual_freq = 1
start += 'a1' # hack
if end:
end += 'a1'
split = 'a'
else:
raise ValueError("Date %s not understood" % start)
yr1, offset1 = lmap(int, start.replace(":","").split(split))
if end is not None:
end = end.lower()
yr2, offset2 = lmap(int, end.replace(":","").split(split))
length = (yr2 - yr1) * annual_freq + offset2
elif length:
yr2 = yr1 + length // annual_freq
offset2 = length % annual_freq + (offset1 - 1)
years = np.repeat(lrange(yr1+1, yr2), annual_freq).tolist()
years = np.r_[[str(yr1)]*(annual_freq+1-offset1), years] # tack on first year
years = np.r_[years, [str(yr2)]*offset2] # tack on last year
if split != 'a':
offset = np.tile(np.arange(1, annual_freq+1), yr2-yr1-1)
offset = np.r_[np.arange(offset1, annual_freq+1).astype('a2'), offset]
offset = np.r_[offset, np.arange(1,offset2+1).astype('a2')]
date_arr_range = [''.join([i, split, asstr(j)]) for i,j in
zip(years, offset)]
else:
date_arr_range = years.tolist()
return date_arr_range