本文整理汇总了Python中scipy.concatenate函数的典型用法代码示例。如果您正苦于以下问题:Python concatenate函数的具体用法?Python concatenate怎么用?Python concatenate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了concatenate函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self,layers,gridOpts):
''' Initialize the grid using the given layers and grid options.
'''
segments = []
qStart = scipy.inf
qEnd = -scipy.inf
for layer in layers:
if layer.isQuantum:
d1 = dn = gridOpts.dzQuantum
segments += [self.get_dz_segment(d1,dn,layer.thickness)]
qStart = min(qStart,sum([len(seg) for seg in segments[:-1]]))
qEnd = max(qEnd, sum([len(seg) for seg in segments]))
elif gridOpts.useFixedGrid:
d1 = dn = gridOpts.dz
segments += [self.get_dz_segment(d1,dn,layer.thickness)]
elif layer.thickness*gridOpts.dzCenterFraction > gridOpts.dzEdge:
d1 = dn = gridOpts.dzEdge
dc = gridOpts.dzCenterFraction*layer.thickness
segments += [self.get_dz_segment(d1,dc,layer.thickness/2),
self.get_dz_segment(dc,dn,layer.thickness/2)]
else:
d1 = dn = gridOpts.dzEdge
segments += [self.get_dz_segment(d1,dn,layer.thickness)]
self.dz = scipy.concatenate(segments)
self.z = scipy.concatenate(([0],scipy.cumsum(self.dz)))
self.zr = (self.z[:-1]+self.z[1:])/2
self.znum = len(self.z)
self.rnum = len(self.zr)
self.gridOpts = gridOpts
self.qIndex = scipy.arange(qStart,qEnd+1) # Wavefunction index
self.qrIndex = scipy.arange(qStart,qEnd) # Quantum region index
示例2: _initParams_fast
def _initParams_fast(self):
"""
initialize the gp parameters
1) project Y on the known factor X0 -> Y0
average variance of Y0 is used to initialize the variance explained by X0
2) considers the residual Y1 = Y-Y0 (this equivals to regress out X0)
3) perform PCA on cov(Y1) and considers the first k PC for initializing X
4) the variance of all other PCs is used to initialize the noise
5) the variance explained by interaction is set to a small random number
"""
Xd = LA.pinv(self.X0)
Y0 = self.X0.dot(Xd.dot(self.Y))
Y1 = self.Y-Y0
YY = SP.cov(Y1)
S,U = LA.eigh(YY)
X = U[:,-self.k:]*SP.sqrt(S[-self.k:])
a = SP.array([SP.sqrt(Y0.var(0).mean())])
b = 1e-3*SP.randn(1)
c = SP.array([SP.sqrt((YY-SP.dot(X,X.T)).diagonal().mean())])
# gp hyper params
params = limix.CGPHyperParams()
if self.interaction:
params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F'),SP.ones(1),b])
else:
params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F')])
params['lik'] = c
return params
示例3: dwt_2d
def dwt_2d(image, poly, l=1):
"""
Computes the discrete wavelet transform for a 2D input image
:param image: input image to be processed
:param poly: polyphase filter matrix cointing the lowpass and highpass coefficients
:param l: amount of transforms to be applied
:return: the transformed image
"""
assert max(mod(image.shape, 2**l)) == 0, 'image dimension ({}) does not allow for a {}-level decomposition'.format(image.shape, l)
image_ = image.copy()
for level in range(l):
sub_image = image_[:(image.shape[0]/(2**level)), :(image.shape[1]/(2**level))]
for row in range(sub_image.shape[0]):
s = sub_image[row, :]
a, d = dwt(s, poly)
sub_image[row, :] = concatenate((a[newaxis, :], d[0][newaxis, :]), axis=1)
for col in range(sub_image.shape[1]):
s = sub_image[:, col]
a, d = dwt(s, poly)
sub_image[:, col] = concatenate((a, d[0]), axis=0)
return image_
示例4: idwt
def idwt(a, d, poly, l=1):
"""
Computes the inverse discrete wavelet transform for a 1D signal
:param a: the approximation coefficients at the deepest level
:param d: a list of detail coefficients for each level
:param poly: polyphase filter matrix cointing the lowpass and highpass coefficients
:param l: amount of transforms to be applied
:return: the transformed signal
"""
assert len(d) == l, 'insufficient detail coefficients provided for reconstruction depth {}'.format(l)
if len(a.shape) == 1:
a = a[newaxis, :]
for level in reversed(range(l)):
decomposition = concatenate((a, d[level][newaxis, :]), axis=0)
reconstruction = zeros_like(decomposition, dtype=float)
for z in range(poly.shape[1]/2):
reconstruction += dot(poly[:, 2*z:2*z+2].transpose(), concatenate(
(decomposition[:, decomposition.shape[1]-z:], decomposition[:, :decomposition.shape[1]-z]), axis=1))
a = reconstruction.transpose().reshape(1, 2*a.shape[1])
return a
示例5: mlr
def mlr(x,y,order):
"""Multiple linear regression fit of the columns of matrix x
(dependent variables) to constituent vector y (independent variables)
order - order of a smoothing polynomial, which can be included
in the set of independent variables. If order is
not specified, no background will be included.
b - fit coeffs
f - fit result (m x 1 column vector)
r - residual (m x 1 column vector)
"""
if order > 0:
s=scipy.ones((len(y),1))
for j in range(order):
s=scipy.concatenate((s,(scipy.arange(0,1+(1.0/(len(y)-1)),1.0/(len(y)-1))**j)[:,nA]),1)
X=scipy.concatenate((x, s),1)
else:
X = x
#calc fit b=fit coefficients
b = scipy.dot(scipy.dot(scipy.linalg.pinv(scipy.dot(scipy.transpose(X),X)),scipy.transpose(X)),y)
f = scipy.dot(X,b)
r = y - f
return b,f,r
示例6: invert_epochs
def invert_epochs(epochs, end=None):
"""inverts epochs inverted
The first epoch will be mapped to [0, start] and the last will be mapped
to [end of last epoch, :end:]. Epochs that accidentally become negative
or zero-length will be omitted.
:type epochs: ndarray
:param epochs: epoch set to invert
:type end: int
:param end: If not None, it i taken for the end of the last epoch,
else max(index-dtype) is taken instead.
Default=None
:returns: ndarray - inverted epoch set
"""
# checks
if end is None:
end = sp.iinfo(INDEX_DTYPE).max
else:
end = INDEX_DTYPE.type(end)
# flip them
rval = sp.vstack((sp.concatenate(([0], epochs[:, 1])), sp.concatenate((epochs[:, 0], [end])))).T
return (rval[rval[:, 1] - rval[:, 0] > 0]).astype(INDEX_DTYPE)
示例7: ar_model_check_stable
def ar_model_check_stable(A):
"""check if this AR model is stable
:Parameters:
A : ndarray
The coefficient matrix of the model
"""
# inits and checks
m, p = A.shape
p /= m
if p != round(p):
raise ValueError('bad inputs!')
# check for stable model
A1 = N.concatenate((
A,
N.concatenate((
N.eye((p - 1) * m),
N.zeros(((p - 1) * m, m))
), axis=1)
))
lambdas = NL.eigvals(A1)
rval = True
if (N.absolute(lambdas) > 1).any():
rval = False
del A1, lambdas
return rval
示例8: _update_6
def _update_6(self):
# construct system
Ax = scipy.zeros((len(self.data), 6))
Ax[:, 0] = 1.0
Ax[:, 2] = self.data[:, 0] - self.center[0]
Ax[:, 3] = self.data[:, 1] - self.center[1]
Ay = scipy.zeros((len(self.data), 6))
Ay[:, 1] = 1.0
Ay[:, 4] = self.data[:, 0] - self.center[0]
Ay[:, 5] = self.data[:, 1] + self.center[1]
A = scipy.concatenate((Ax, Ay), axis = 0)
del Ax, Ay
b = scipy.concatenate((self.data[:, 2], self.data[:, 3]))
# solve for parameters
parameters, residual, rank, sigma = scipy.linalg.lstsq(A, b)
self.tx = parameters[0]
self.ty = parameters[1]
self.exx = parameters[2]
self.exy = parameters[3]
self.eyx = parameters[4]
self.eyy = parameters[5]
del parameters
# compute residuals
self.residuals[:, 2] = self.data[:, 2] - self.tx - self.exx * (self.data[:, 0] - self.center[0]) - self.exy * (self.data[:, 1] - self.center[1])
self.residuals[:, 3] = self.data[:, 3] - self.ty - self.eyx * (self.data[:, 0] - self.center[0]) - self.eyy * (self.data[:, 1] - self.center[1])
示例9: shift_row
def shift_row(row, shift):
if shift == 0:
return row
if shift > 0:
return sp.concatenate(([0] * shift, row[:-shift]))
else:
return sp.concatenate((row[-shift:], [0] * -shift))
示例10: ideal_data
def ideal_data(num, dimU, dimY, dimX, noise=1):
"""Linear system data"""
# generate randomized linear system matrices
A = randn(dimX, dimX)
B = randn(dimX, dimU)
C = randn(dimY, dimX)
D = randn(dimY, dimU)
# make sure state evolution is stable
U, S, V = svd(A)
A = dot(U, dot(diag(S / max(S)), V))
U, S, V = svd(B)
S2 = zeros((size(U,1), size(V,0)))
S2[:,:size(U,1)] = diag(S / max(S))
B = dot(U, dot(S2, V))
# random input
U = randn(num, dimU)
# initial state
X = reshape(randn(dimX), (1,-1))
# initial output
Y = reshape(dot(C, X[-1]) + dot(D, U[0]), (1,-1))
# generate next state
X = concatenate((X, reshape(dot(A, X[-1]) + dot(B, U[0]), (1,-1))))
# and so forth
for u in U[1:]:
Y = concatenate((Y, reshape(dot(C, X[-1]) + dot(D, u), (1,-1))))
X = concatenate((X, reshape(dot(A, X[-1]) + dot(B, u), (1,-1))))
return U, Y + randn(num, dimY) * noise
示例11: roc
def roc(labels, predictions):
"""roc - calculate receiver operator curve
labels: true labels (>0 : True, else False)
predictions: the ranking generated from whatever predictor is used"""
#1. convert to arrays
labels = S.array(labels).reshape([-1])
predictions = S.array(predictions).reshape([-1])
#threshold
t = labels>0
#sort predictions in desceninding order
#get order implied by predictor (descending)
Ix = S.argsort(predictions)[::-1]
#reorder truth
t = t[Ix]
#compute true positiive and false positive rates
tp = S.double(N.cumsum(t))/t.sum()
fp = S.double(N.cumsum(~t))/(~t).sum()
#add end points
tp = S.concatenate(([0],tp,[1]))
fp = S.concatenate(([0],fp,[1]))
return [tp,fp]
示例12: run_interact
def run_interact(Y, intA, intB, covs, K):
""" Calculate pvalues for the nested model of including a multiplicative term between intA and intB into the additive model """
[N, Ny] = Y.shape
Na = intA.shape[1] # number of interaction terms 1
Nb = intB.shape[1] # number of interaction terms 2
S,U=LA.eigh(K);
UY=SP.dot(U.T,Y);
UintA=SP.dot(U.T,intA);
UintB=SP.dot(U.T,intB);
Ucovs=SP.dot(U.T,covs);
# for each snp/gene/factor combination, run a lod
# snps need to be diced bc of missing values - iterate over them, else in arrays
lods = SP.zeros([Na, Nb, Ny])
#add mean column:
if covs is None: covs = SP.ones([Ny,1])
# for each pair of interacting terms
for a in range(Na):
for b in range(Nb):
# calculate additive and interaction terms
C = SP.concatenate((Ucovs, UintA[:,a:a+1], UintB[:,b:b+1]))
X = intA[:,a:a+1]*intB[:,b:b+1]
UX = SP.dot(U.T,X);
UX = SP.concatenate((UX, C))
for phen in SP.arange(Ny):
UY_=UY[:,phen];
nllnull,ldeltanull=optdelta(UY_,C,S,ldeltanull=None,numintervals=10,ldeltamin=-5.0,ldeltamax=5.0);
nllalt,ldeltaalt=optdelta(UY_,UX,S,ldeltanull=ldeltanull,numintervals=100,ldeltamin=-5.0,ldeltamax=5.0);
lods[a,b,phen] = nllalt-nllalt;
return lods
示例13: generateNodesAdaptive
def generateNodesAdaptive(self):
innerDomainSize = self.innerDomainSize
innerMeshSize = self.innerMeshSize
numberElementsInnerDomain = innerDomainSize/innerMeshSize
assert(numberElementsInnerDomain < self.numberElements)
domainCenter = (self.domainStart+self.domainEnd)/2
nodes0 = np.linspace(domainCenter,innerDomainSize/2.0,(numberElementsInnerDomain/2.0)+1.0)
nodes0 = np.delete(nodes0,-1)
numberOuterIntervalsFromDomainCenter = (self.numberElements - numberElementsInnerDomain)/2.0
const = np.log2(innerDomainSize/2.0)/0.5
exp = np.linspace(const,np.log2(self.domainEnd*self.domainEnd),numberOuterIntervalsFromDomainCenter+1)
nodes1 = np.power(np.sqrt(2),exp)
nodesp = np.concatenate((nodes0,nodes1))
nodesn = -nodesp[::-1]
nodesn = np.delete(nodesn,-1)
linNodalCoordinates = np.concatenate((nodesn,nodesp))
nodalCoordinates = 0
#Introduce higher order nodes
if self.elementType == "quadratic" or self.elementType == "cubic":
if self.elementType == "quadratic":
numberNodesPerElement = 3
elif self.elementType == "cubic":
numberNodesPerElement = 4
for i in range(0,len(linNodalCoordinates)-1):
newnodes = np.linspace(linNodalCoordinates[i],linNodalCoordinates[i+1],numberNodesPerElement)
nodalCoordinates = np.delete(nodalCoordinates,-1)
nodalCoordinates = np.concatenate((nodalCoordinates,newnodes))
else:
nodalCoordinates = linNodalCoordinates
return nodalCoordinates
示例14: main
def main():
points = generate_gaussian(1000, 2, 0, 2, center=(10, 0))
pylab.plot (points[:,0], points[:,1], 'r+')
#export("Classe A", points)
points2 = generate_gaussian(1000, 2, 0, 2, center=(5, 5))
pylab.plot (points2[:,0], points2[:,1], 'b+')
#export("Classe C", points)
points3 = generate_gaussian(1000, 2, 0, 2, center=(0, 10))
pylab.plot (points3[:,0], points3[:,1], 'y+')
points4 = generate_gaussian(1000, 2, 0, 2, center=(0, 0))
pylab.plot (points4[:,0], points4[:,1], 'g+')
pylab.axis([-10, 20, -10, 20])
pylab.show()
labels = []
for i in xrange(len(points)):
labels.append(0)
for i in xrange(len(points2)):
labels.append(1)
for i in xrange(len(points3)):
labels.append(2)
for i in xrange(len(points4)):
labels.append(3)
points = scipy.concatenate ((points, points2))
points = scipy.concatenate ((points, points3))
points = scipy.concatenate ((points, points4))
data = dataset.Dataset (points, labels)
data.random ()
dataset.save (data, "../datasets/4gaussians1k.data")
示例15: __call__
def __call__(self, Xi, Xj, ni, nj, hyper_deriv=None, symmetric=False):
"""Evaluate the covariance between points `Xi` and `Xj` with derivative order `ni`, `nj`.
Parameters
----------
Xi : :py:class:`Matrix` or other Array-like, (`M`, `N`)
`M` inputs with dimension `N`.
Xj : :py:class:`Matrix` or other Array-like, (`M`, `N`)
`M` inputs with dimension `N`.
ni : :py:class:`Matrix` or other Array-like, (`M`, `N`)
`M` derivative orders for set `i`.
nj : :py:class:`Matrix` or other Array-like, (`M`, `N`)
`M` derivative orders for set `j`.
hyper_deriv : Non-negative int or None, optional
The index of the hyperparameter to compute the first derivative
with respect to. If None, no derivatives are taken. Hyperparameter
derivatives are not supported at this point. Default is None.
symmetric : bool, optional
Whether or not the input `Xi`, `Xj` are from a symmetric matrix.
Default is False.
Returns
-------
Kij : :py:class:`Array`, (`M`,)
Covariances for each of the `M` `Xi`, `Xj` pairs.
Raises
------
NotImplementedError
If the `hyper_deriv` keyword is not None.
"""
if hyper_deriv is not None:
raise NotImplementedError("Hyperparameter derivatives have not been implemented!")
n_cat = scipy.asarray(scipy.concatenate((ni, nj), axis=1), dtype=int)
X_cat = scipy.asarray(scipy.concatenate((Xi, Xj), axis=1), dtype=float)
n_cat_unique = unique_rows(n_cat)
k = scipy.zeros(Xi.shape[0], dtype=float)
# Loop over unique derivative patterns:
if self.num_proc > 1:
pool = multiprocessing.Pool(processes=self.num_proc)
for n_cat_state in n_cat_unique:
idxs = scipy.where(scipy.asarray((n_cat == n_cat_state).all(axis=1)).squeeze())[0]
if (n_cat_state == 0).all():
k[idxs] = self.cov_func(Xi[idxs, :], Xj[idxs, :], *self.params)
else:
if self.num_proc > 1 and len(idxs) > 1:
k[idxs] = scipy.asarray(
pool.map(_ArbitraryKernelEval(self, n_cat_state), X_cat[idxs, :]),
dtype=float
)
else:
for idx in idxs:
k[idx] = mpmath.chop(mpmath.diff(self._mask_cov_func,
X_cat[idx, :],
n=n_cat_state,
singular=True))
if self.num_proc > 0:
pool.close()
return k