本文整理汇总了Python中statsmodels.compat.python.range函数的典型用法代码示例。如果您正苦于以下问题:Python range函数的具体用法?Python range怎么用?Python range使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了range函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: generate_ordinal
def generate_ordinal():
## Regression coefficients
beta = np.zeros(5, dtype=np.float64)
beta[2] = 1
beta[4] = -1
rz = 0.5
OUT = open("gee_ordinal_1.csv", "w")
for i in range(200):
n = np.random.randint(3, 6) # Cluster size
x = np.random.normal(size=(n,5))
for j in range(5):
x[:,j] += np.random.normal()
pr = np.dot(x, beta)
pr = np.array([1,0,-0.5]) + pr[:,None]
pr = 1 / (1 + np.exp(-pr))
z = rz*np.random.normal() +\
np.sqrt(1-rz**2)*np.random.normal(size=n)
u = norm.cdf(z)
y = (u[:,None] > pr).sum(1)
for j in range(n):
OUT.write("%d,%d," % (i, y[j]))
OUT.write(",".join(["%.3f" % b for b in x[j,:]]) + "\n")
OUT.close()
示例2: levinson_durbin_nitime
def levinson_durbin_nitime(s, order=10, isacov=False):
'''Levinson-Durbin recursion for autoregressive processes
'''
#from nitime
## if sxx is not None and type(sxx) == np.ndarray:
## sxx_m = sxx[:order+1]
## else:
## sxx_m = ut.autocov(s)[:order+1]
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:order+1] #not tested
phi = np.zeros((order+1, order+1), 'd')
sig = np.zeros(order+1)
# initial points for the recursion
phi[1,1] = sxx_m[1]/sxx_m[0]
sig[1] = sxx_m[0] - phi[1,1]*sxx_m[1]
for k in range(2,order+1):
phi[k,k] = (sxx_m[k]-np.dot(phi[1:k,k-1], sxx_m[1:k][::-1]))/sig[k-1]
for j in range(1,k):
phi[j,k] = phi[j,k-1] - phi[k,k]*phi[k-j,k-1]
sig[k] = sig[k-1]*(1 - phi[k,k]**2)
sigma_v = sig[-1]; arcoefs = phi[1:,-1]
return sigma_v, arcoefs, pacf, phi #return everything
示例3: dataset
def dataset(self, as_dict=False):
"""
Returns a Python generator object for iterating over the dataset.
Parameters
----------
as_dict : bool, optional
If as_dict is True, yield each row of observations as a dict.
If False, yields each row of observations as a list.
Returns
-------
Generator object for iterating over the dataset. Yields each row of
observations as a list by default.
Notes
-----
If missing_values is True during instantiation of StataReader then
observations with _StataMissingValue(s) are not filtered and should
be handled by your applcation.
"""
try:
self._file.seek(self._data_location)
except Exception:
pass
if as_dict:
vars = lmap(str, self.variables())
for i in range(len(self)):
yield dict(zip(vars, self._next()))
else:
for i in range(self._header['nobs']):
yield self._next()
示例4: prob_quantize_cdf_old
def prob_quantize_cdf_old(binsx, binsy, cdf):
'''quantize a continuous distribution given by a cdf
old version without precomputing cdf values
Parameters
----------
binsx : array_like, 1d
binedges
'''
binsx = np.asarray(binsx)
binsy = np.asarray(binsy)
nx = len(binsx) - 1
ny = len(binsy) - 1
probs = np.nan * np.ones((nx, ny)) #np.empty(nx,ny)
for xind in range(1, nx+1):
for yind in range(1, ny+1):
upper = (binsx[xind], binsy[yind])
lower = (binsx[xind-1], binsy[yind-1])
#print upper,lower,
probs[xind-1,yind-1] = prob_bv_rectangle(lower, upper, cdf)
assert not np.isnan(probs).any()
return probs
示例5: prob_quantize_cdf
def prob_quantize_cdf(binsx, binsy, cdf):
'''quantize a continuous distribution given by a cdf
Parameters
----------
binsx : array_like, 1d
binedges
'''
binsx = np.asarray(binsx)
binsy = np.asarray(binsy)
nx = len(binsx) - 1
ny = len(binsy) - 1
probs = np.nan * np.ones((nx, ny)) #np.empty(nx,ny)
cdf_values = cdf(binsx[:,None], binsy)
cdf_func = lambda x, y: cdf_values[x,y]
for xind in range(1, nx+1):
for yind in range(1, ny+1):
upper = (xind, yind)
lower = (xind-1, yind-1)
#print upper,lower,
probs[xind-1,yind-1] = prob_bv_rectangle(lower, upper, cdf_func)
assert not np.isnan(probs).any()
return probs
示例6: prob_mv_grid
def prob_mv_grid(bins, cdf, axis=-1):
'''helper function for probability of a rectangle grid in a multivariate distribution
how does this generalize to more than 2 variates ?
bins : tuple
tuple of bin edges, currently it is assumed that they broadcast
correctly
'''
if not isinstance(bins, np.ndarray):
bins = lmap(np.asarray, bins)
n_dim = len(bins)
bins_ = []
#broadcast if binedges are 1d
if all(lmap(np.ndim, bins) == np.ones(n_dim)):
for d in range(n_dim):
sl = [None]*n_dim
sl[d] = slice(None)
bins_.append(bins[d][sl])
else: #assume it is already correctly broadcasted
n_dim = bins.shape[0]
bins_ = bins
print(len(bins))
cdf_values = cdf(bins_)
probs = cdf_values.copy()
for d in range(n_dim):
probs = np.diff(probs, axis=d)
return probs
示例7: _prepare_structured_array
def _prepare_structured_array(self, data):
self.nobs = len(data)
self.nvar = len(data.dtype)
self.data = data
self.datarows = iter(data)
dtype = data.dtype
descr = dtype.descr
if dtype.names is None:
varlist = _default_names(self.nvar)
else:
varlist = dtype.names
# check for datetime and change the type
convert_dates = self._convert_dates
if convert_dates is not None:
convert_dates = _maybe_convert_to_int_keys(convert_dates,
varlist)
self._convert_dates = convert_dates
for key in convert_dates:
descr[key] = (
descr[key][0],
_convert_datetime_to_stata_type(convert_dates[key])
)
dtype = np.dtype(descr)
self.varlist = varlist
self.typlist = [_dtype_to_stata_type(dtype[i])
for i in range(self.nvar)]
self.fmtlist = [_dtype_to_default_stata_fmt(dtype[i])
for i in range(self.nvar)]
# set the given format for the datetime cols
if convert_dates is not None:
for key in convert_dates:
self.fmtlist[key] = convert_dates[key]
示例8: test_generate_sample
def test_generate_sample(self):
process = ArmaProcess.from_coeffs([0.9])
np.random.seed(12345)
sample = process.generate_sample()
np.random.seed(12345)
expected = np.random.randn(100)
for i in range(1, 100):
expected[i] = 0.9 * expected[i - 1] + expected[i]
assert_almost_equal(sample, expected)
process = ArmaProcess.from_coeffs([1.6, -0.9])
np.random.seed(12345)
sample = process.generate_sample()
np.random.seed(12345)
expected = np.random.randn(100)
expected[1] = 1.6 * expected[0] + expected[1]
for i in range(2, 100):
expected[i] = 1.6 * expected[i - 1] - 0.9 * expected[i - 2] + expected[i]
assert_almost_equal(sample, expected)
process = ArmaProcess.from_coeffs([1.6, -0.9])
np.random.seed(12345)
sample = process.generate_sample(burnin=100)
np.random.seed(12345)
expected = np.random.randn(200)
expected[1] = 1.6 * expected[0] + expected[1]
for i in range(2, 200):
expected[i] = 1.6 * expected[i - 1] - 0.9 * expected[i - 2] + expected[i]
assert_almost_equal(sample, expected[100:])
np.random.seed(12345)
sample = process.generate_sample(nsample=(100,5))
assert_equal(sample.shape, (100,5))
示例9: generate_poisson
def generate_poisson():
## Regression coefficients
beta = np.zeros(5, dtype=np.float64)
beta[2] = 0.5
beta[4] = -0.5
nclust = 100
rz = 0.5
OUT = open("gee_poisson_1.csv", "w")
for i in range(nclust):
n = np.random.randint(3, 6) # Cluster size
x = np.random.normal(size=(n,5))
for j in range(5):
x[:,j] += np.random.normal()
lp = np.dot(x, beta)
E = np.exp(lp)
y = [np.random.poisson(e) for e in E]
y = np.array(y)
for j in range(n):
OUT.write("%d,%d," % (i, y[j]))
OUT.write(",".join(["%.3f" % b for b in x[j,:]]) + "\n")
OUT.close()
示例10: test_ftest_pvalues
def test_ftest_pvalues(self):
res = self.results
use_t = res.use_t
k_vars = len(res.params)
# check default use_t
pvals = [res.wald_test(np.eye(k_vars)[k], use_f=use_t).pvalue
for k in range(k_vars)]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# sutomatic use_f based on results class use_t
pvals = [res.wald_test(np.eye(k_vars)[k]).pvalue
for k in range(k_vars)]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# label for pvalues in summary
string_use_t = 'P>|z|' if use_t is False else 'P>|t|'
summ = str(res.summary())
assert_(string_use_t in summ)
# try except for models that don't have summary2
try:
summ2 = str(res.summary2())
except AttributeError:
summ2 = None
if summ2 is not None:
assert_(string_use_t in summ2)
示例11: _eigval_decomp_SZ
def _eigval_decomp_SZ(self, irf_resim):
"""
Returns
-------
W: array of eigenvectors
eigva: list of eigenvalues
k: matrix indicating column # of largest eigenvalue for each c_i,j
"""
neqs = self.neqs
periods = self.periods
cov_hold = np.zeros((neqs, neqs, periods, periods))
for i in range(neqs):
for j in range(neqs):
cov_hold[i,j,:,:] = np.cov(irf_resim[:,1:,i,j],rowvar=0)
W = np.zeros((neqs, neqs, periods, periods))
eigva = np.zeros((neqs, neqs, periods, 1))
k = np.zeros((neqs, neqs))
for i in range(neqs):
for j in range(neqs):
W[i,j,:,:], eigva[i,j,:,0], k[i,j] = util.eigval_decomp(cov_hold[i,j,:,:])
return W, eigva, k
示例12: __iter__
def __iter__(self):
n = self.n
k = self.k
start = self.start
if self.return_slice:
for i in range(start, n-k):
train_slice = slice(None, i, None)
if self.kall:
test_slice = slice(i, i+k)
else:
test_slice = slice(i+k-1, i+k)
yield train_slice, test_slice
else: #for compatibility with other iterators
for i in range(start, n-k):
train_index = np.zeros(n, dtype=np.bool)
train_index[:i] = True
test_index = np.zeros(n, dtype=np.bool)
if self.kall:
test_index[i:i+k] = True # np.logical_not(test_index)
else:
test_index[i+k-1:i+k] = True
#or faster to return np.arange(i,i+k) ?
#returning slice should be faster in this case
yield train_index, test_index
示例13: approx_hess2
def approx_hess2(x, f, epsilon=None, args=(), kwargs={}, return_grad=False):
#
n = len(x)
# NOTE: ridout suggesting using eps**(1/4)*theta
h = _get_epsilon(x, 3, epsilon, n)
ee = np.diag(h)
f0 = f(*((x,)+args), **kwargs)
# Compute forward step
g = np.zeros(n)
gg = np.zeros(n)
for i in range(n):
g[i] = f(*((x+ee[i, :],)+args), **kwargs)
gg[i] = f(*((x-ee[i, :],)+args), **kwargs)
hess = np.outer(h, h) # this is now epsilon**2
# Compute "double" forward step
for i in range(n):
for j in range(i, n):
hess[i, j] = (f(*((x + ee[i, :] + ee[j, :],) + args), **kwargs) -
g[i] - g[j] + f0 +
f(*((x - ee[i, :] - ee[j, :],) + args), **kwargs) -
gg[i] - gg[j] + f0)/(2 * hess[i, j])
hess[j, i] = hess[i, j]
if return_grad:
grad = (g - f0)/h
return hess, grad
else:
return hess
示例14: initialize
def initialize(self, model):
super(GlobalOddsRatio, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for GlobalOddsRatio "
"cov_struct, using unweighted covariance estimate",
NotImplementedWarning)
# Need to restrict to between-subject pairs
cpp = []
for v in model.endog_li:
# Number of subjects in this group
m = int(len(v) / self._ncut)
i1, i2 = np.tril_indices(m, -1)
cpp1 = {}
for k1 in range(self._ncut):
for k2 in range(k1 + 1):
jj = np.zeros((len(i1), 2), dtype=np.int64)
jj[:, 0] = i1 * self._ncut + k1
jj[:, 1] = i2 * self._ncut + k2
cpp1[(k2, k1)] = jj
cpp.append(cpp1)
self.cpp = cpp
# Initialize the dependence parameters
self.crude_or = self.observed_crude_oddsratio()
if self.model.update_dep:
self.dep_params = self.crude_or
示例15: make_lag_names
def make_lag_names(names, lag_order, trendorder=1, exog=None):
"""
Produce list of lag-variable names. Constant / trends go at the beginning
Examples
--------
>>> make_lag_names(['foo', 'bar'], 2, 1)
['const', 'L1.foo', 'L1.bar', 'L2.foo', 'L2.bar']
"""
lag_names = []
if isinstance(names, string_types):
names = [names]
# take care of lagged endogenous names
for i in range(1, lag_order + 1):
for name in names:
if not isinstance(name, string_types):
name = str(name) # will need consistent unicode handling
lag_names.append('L'+str(i)+'.'+name)
# handle the constant name
if trendorder != 0:
lag_names.insert(0, 'const')
if trendorder > 1:
lag_names.insert(1, 'trend')
if trendorder > 2:
lag_names.insert(2, 'trend**2')
if exog is not None:
for i in range(exog.shape[1]):
lag_names.insert(trendorder + i, "exog" + str(i))
return lag_names