本文整理汇总了Python中pylab.transpose函数的典型用法代码示例。如果您正苦于以下问题:Python transpose函数的具体用法?Python transpose怎么用?Python transpose使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了transpose函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: direct2cartesian
def direct2cartesian(self):
"""Convert atom coordinates from direct to cartesian"""
if self.cartesian:
return
self.atoms = m.transpose(m.dot(self.lattice_constant*self.basis_vectors, \
m.transpose(self.atoms)))
self.cartesian = True
示例2: rotate_molecule
def rotate_molecule(coords, rotp = m.array((0.,0.,0.)), phi = 0., \
theta = 0., psi = 0.):
"""Rotate a molecule via Euler angles.
See http://mathworld.wolfram.com/EulerAngles.html for definition.
Input arguments:
coords: Atom coordinates, as Nx3 2d pylab array.
rotp: The point to rotate about, as a 1d 3-element pylab array
phi: The 1st rotation angle around z axis.
theta: Rotation around x axis.
psi: 2nd rotation around z axis.
"""
# First move the molecule to the origin
# In contrast to MATLAB, numpy broadcasts the smaller array to the larger
# row-wise, so there is no need to play with the Kronecker product.
rcoords = coords - rotp
# First Euler rotation about z in matrix form
D = m.array(((m.cos(phi), m.sin(phi), 0.), (-m.sin(phi), m.cos(phi), 0.), \
(0., 0., 1.)))
# Second Euler rotation about x:
C = m.array(((1., 0., 0.), (0., m.cos(theta), m.sin(theta)), \
(0., -m.sin(theta), m.cos(theta))))
# Third Euler rotation, 2nd rotation about z:
B = m.array(((m.cos(psi), m.sin(psi), 0.), (-m.sin(psi), m.cos(psi), 0.), \
(0., 0., 1.)))
# Total Euler rotation
A = m.dot(B, m.dot(C, D))
# Do the rotation
rcoords = m.dot(A, m.transpose(rcoords))
# Move back to the rotation point
return m.transpose(rcoords) + rotp
示例3: plotEnsemble2D
def plotEnsemble2D(ens,v1,v2,colordata=None,hess=None,\
size=50,labelBest=True,ensembleAlpha=0.75,contourAlpha=1.0):
"""
Plots a 2-dimensional projection of a given parameter
ensemble, along given directions:
-- If v1 and v2 are scalars, project onto plane given by
those two bare parameter directions.
-- If v1 and v2 are vectors, project onto those two vectors.
When given colordata (either a single color, or an array
of different colors the length of ensemble size), each point
will be assigned a color based on the colordata.
With labelBest set, the first point in the ensemble is
plotted larger (to show the 'best fit' point for a usual
parameter ensemble).
If a Hessian is given, cost contours will be plotted
using plotContours2D.
"""
if pylab.shape(v1) is ():
xdata = pylab.transpose(ens)[v1]
ydata = pylab.transpose(ens)[v2]
# label axes
param1name, param2name = '',''
try:
paramLabels = ens[0].keys()
except:
paramLabels = None
if paramLabels is not None:
param1name = ' ('+paramLabels[param1]+')'
param2name = ' ('+paramLabels[param2]+')'
pylab.xlabel('Parameter '+str(v1)+param1name)
pylab.ylabel('Parameter '+str(v2)+param2name)
else:
xdata = pylab.dot(ens,v1)
ydata = pylab.dot(ens,v2)
if colordata==None:
colordata = pylab.ones(len(xdata))
if labelBest: # plot first as larger circle
if pylab.shape(colordata) is (): # single color
colordata0 = colordata
colordataRest = colordata
else: # specified colors
colordata0 = [colordata[0]]
colordataRest = colordata[1:]
scatterColors(xdata[1:],ydata[1:],colordataRest, \
size,alpha=ensembleAlpha)
scatterColors([xdata[0]],[ydata[0]],colordata0, \
size*4,alpha=ensembleAlpha)
else:
scatterColors(xdata,ydata,colordata,size,alpha=ensembleAlpha)
if hess is not None:
plotApproxContours2D(hess,param1,param2,pylab.array(ens[0]), \
alpha=contourAlpha)
示例4: cartesian2direct
def cartesian2direct(self):
"""Convert atom coordinates from cartesian to direct"""
if not self.cartesian:
return
self.atoms = m.transpose(m.linalg.solve(self.lattice_constant * \
self.basis_vectors, \
m.transpose(self.atoms)))
self.cartesian = False
示例5: degraderesolution
def degraderesolution(prefix,factor,dlogstring):
covar = M.load(prefix+'covar.dat')
pnl = M.load(prefix+'pnl.dat')
dlog = M.load(prefix+dlogstring)[:,1]
k = pnl[:,0]*1.
p = pnl[:,1]*1.
gausspart = M.load(prefix+'gausspart.dat')
nbins = len(k)
nongausspart = covar - gausspart
nongausspartnew = nongausspart[:nbins-factor:factor,:nbins-factor:factor]*0.
knew = k[:nbins-factor:factor]*0.
pnew = p[:nbins-factor:factor]*0.
gausspartnew = gausspart[:nbins-factor:factor,:nbins-factor:factor]*0.
nbinsnew = len(knew)
dlognew = dlog[:nbins-factor:factor]*0.
for i1 in range(0,nbins-factor,factor):
i1new = i1/factor
print i1,i1+factor-1,nbins
print i1new,nbinsnew
weights = k[i1:i1+factor-1]**3
sumweights = M.sum(weights)
pnew[i1new] = M.sum(p[i1:i1+factor-1]*weights)/sumweights
knew[i1new] = M.sum(k[i1:i1+factor-1]*weights)/sumweights
dlognew[i1new] = M.sum(dlog[i1:i1+factor-1]*weights)/sumweights
sqrtkfact = M.sqrt(k[1]/k[0])
for i1 in range(0,nbins-factor,factor):
i1new = i1/factor
for i2 in range(0,nbins-factor,factor):
i2new = i2/factor
weights2 = M.outer(k[i1:i1+factor-1]**3,k[i2:i2+factor-1]**3)
sumweights2 = M.sum(M.sum(weights2))
nongausspartnew[i1new,i2new] = M.sum(M.sum(nongausspart[i1:i1+factor-1,i2:i2+factor-1]*weights2))/sumweights2
if i1new == i2new:
vk = (4.*M.pi/3.)*((k[i1+factor-1]*sqrtkfact)**3 - (k[i1]/sqrtkfact)**3)
gausspartnew[i1new,i2new] = (2.*M.pi)**3 * 2.*(pnew[i1new]**2)/vk
covarnew = gausspartnew + nongausspartnew
prefixnew = prefix+'degrade'+str(factor)+'/'
os.system('mkdir '+prefixnew)
M.save(prefixnew+'pnl.dat',M.transpose([knew,pnew]), fmt = '%18.16e')
M.save(prefixnew+'covar.dat',covarnew, fmt = '%18.16e')
M.save(prefixnew+'gausspart.dat',gausspartnew, fmt = '%18.16e')
M.save(prefixnew+dlogstring,M.transpose([knew,dlognew]), fmt = '%18.16e')
M.save(prefix+'nbins.dat',M.array([nbinsnew],shape=(1,1,)), fmt = '%d')
示例6: datagen
def datagen(N):
"""
Produces N pairs of training data and desired output;
each sample of training data contains -1 in its first position,
this corresponds to the interpretation of the threshold as first
element of the weight vector
"""
fun1 = lambda x1,x2: -2*x1**3-x2+.5*x1**2
fun2 = lambda x1,x2: x1**2*x2+2*x1*x2+1
fun3 = lambda x1,x2: .5*x1*x2**2+x2**2-2*x1**2
rarr1 = rand(1,N)
rarr2 = rand(1,N)
teacher = sign(rand(1,N)-.5)
idplus = (teacher<0)
idminus = -idplus
rarr1[idplus] = rarr1[idplus]-1
y1=fun1(rarr1,rarr2)
y2=fun2(rarr1,rarr2)
y3=fun3(rarr1,rarr2)
x=transpose(concatenate((-ones((1,N)),y1,y2)))
return x, teacher[0]
示例7: f
def f(filename, theClass=1):
fs, data = wavfile.read(filename) # load the data
# b=[(ele/2**8.)*2-1 for ele in data] # this is 8-bit track, b is now normalized on [-1,1)
print "Sample rates is: "
print fs
X = stft(data, fs, 256.0 / fs, 256.0 / fs)
X = X[:, 0 : (X.shape[1] / 2)]
shortTimeFFT = scipy.absolute(X.T)
shortTimeFFT = scipy.log10(shortTimeFFT)
# Plot the magnitude spectrogram.
pylab.figure()
pylab.imshow(shortTimeFFT, origin="lower", aspect="auto", interpolation="nearest")
pylab.xlabel("Time")
pylab.ylabel("Frequency")
savefig(filename + "SFFT.png", bbox_inches="tight")
features = mean(shortTimeFFT, axis=1)
pylab.figure()
pylab.plot(features, "r")
savefig(filename + "AFFT.png", bbox_inches="tight")
with open(filename + ".csv", "w") as fp:
a = csv.writer(fp, delimiter=",")
row = pylab.transpose(features)
row = pylab.append(row, theClass)
a.writerow(row)
示例8: Q_calc
def Q_calc(self,X):
"""
calculates Q (n_x by n_theta) matrix of the IDE model at each time step
Arguments
----------
X: list of ndarray
state vectors
Returns
---------
Q : list of ndarray (n_x by n_theta)
"""
Q=[]
T=len(X)
Psi=self.model.Gamma_inv_psi_conv_Phi
Psi_T=pb.transpose(self.model.Gamma_inv_psi_conv_Phi,(0,2,1))
for t in range(T):
firing_rate_temp=pb.dot(X[t].T,self.model.Phi_values)
firing_rate=self.model.act_fun.fmax/(1.+pb.exp(self.model.act_fun.varsigma*(self.model.act_fun.v0-firing_rate_temp)))
#calculate q
g=pb.dot(firing_rate,Psi_T)
g *=(self.model.spacestep**2)
q=self.model.Ts*g
q=q.reshape(self.model.nx,self.model.n_theta)
Q.append(q)
return Q
示例9: loadMNISTImages
def loadMNISTImages(filename):
f = open(filename, 'rb')
# Verify Magic Number
s = f.read(4)
magic = int(s.encode('hex'),16)
assert(magic == 2051)
# Get Number of Images
s = f.read(4)
numImages = int(s.encode('hex'),16)
s = f.read(4)
numRows = int(s.encode('hex'),16)
s = f.read(4)
numCols = int(s.encode('hex'),16)
# Get Data
s = f.read()
a = frombuffer(s, uint8)
# Use 'F' to ensure that we read by column
a = reshape(a, (numCols , numRows, numImages), order='F');
images = transpose(a, (1, 0, 2))
f.close()
# Reshape to #pixels * #examples
images = reshape(a, (shape(images)[0] * shape(images)[1], numImages),
order='F');
images = double(images)/255
return images
示例10: load_data
def load_data(filename=None, datastr=None, skip_rows=False,use_cols=False):
"""Load a file with numeric column data into a numpy array.
Automatically skips header unless "skip_rows" is specified.
Loads all columns unless "use_cols" is specified."""
if filename is not None:
textiter = open(filename, 'r')
else:
textiter = iter(datastr.split("\n"))
#datare=re.compile('\s*(-?\d+(\.\d+)?([Ee][+-]?\d+)?(\s+|$)){2,}')
if skip_rows == False:
skip_rows=0
nx=textiter.next()
while datare.match(nx)==None:
# print "skipping row ", skip_rows
skip_rows+=1
print nx
nx=textiter.next()
if filename is not None:
textiter.close()
textiter = open(filename, 'r')
# else:
# textiter=iter(datastr.split("\n"))
if use_cols:
tmp_data=load(textiter,datastr,skiprows=skip_rows,usecols=use_cols)
else:
tmp_data=load(textiter,datastr,skiprows=skip_rows)
if filename is not None:
textiter.close()
return transpose(tmp_data)
示例11: Global_Stiffness
def Global_Stiffness(self):
'''
Generates Global Stiffness Matrix for the plane structure
'''
elem = self.element;
B = py.zeros((6,6))
for i in range (0,py.size(elem,0)):
#for each element find the stifness matrix
K = py.zeros((self.n_nodes*2,self.n_nodes*2))
el = elem[i]
#nodes formatted for input
[node1, node2, node3] = el;
node1x = 2*(node1-1);node2x = 2*(node2-1);node3x = 2*(node3-1);
node1y = 2*(node1-1)+1;node2y = 2*(node2-1)+1;node3y = 2*(node3-1)+1;
#Area, Strain Matrix and E Matrix multiplied to get element stiffness
[J,B] = self.B(el)
local_k =0.5*abs(J)*py.dot(py.transpose(B),py.dot(self.E_matrix,B))
if self.debug:
print 'K for elem', el, '\n', local_k
#Element K-Matrix converted into Global K-Matrix format
K[py.ix_([node1x,node1y,node2x,node2y,node3x,node3y],[node1x,node1y,node2x,node2y,node3x,node3y])] = K[py.ix_([node1x,node1y,node2x,node2y,node3x,node3y],[node1x,node1y,node2x,node2y,node3x,node3y])]+local_k
#Adding contibution into Global Stiffness
self.k_global = self.k_global + K
if self.debug:
print 'Global Stiffness','\n', self.k_global
示例12: read_all_csc
def read_all_csc(data_folder, dtype='int16', assume_same_fs=True, memmap=False, memmap_folder=None, save_for_spikedetekt=False, channels_to_save=None, return_sliced_data=False):
if sys.version_info[0] > 2:
mode = 'br'
else:
mode = 'r'
os_name = platform.system()
if os_name == 'Windows':
sep = '\\'
elif os_name=='Linux':
sep = r'/'
files = [os.path.join(data_folder, f) for f in os.listdir(data_folder) if f.endswith('.ncs')]
order = [int(file.split('.')[0].split('CSC')[1]) for file in files]
sort_order = sorted(range(len(order)),key=order.__getitem__)
ordered_files = [files[i] for i in sort_order]
if memmap:
if not memmap_folder:
raise NameError("A memmap_folder should be defined for memmapped data")
out_filename = data_folder.split(sep)[-1]+'.dat'
out_full_filename = os.path.join(memmap_folder, out_filename)
data = None
i = 0;
for file in ordered_files:
fin = open(file, mode=mode)
x = read_single_csc(fin, assume_same_fs=assume_same_fs, memmap=memmap)
if not assume_same_fs or memmap:
channel_data = x['packets']['samp'].ravel()
if data is None:
data = pylab.memmap(out_full_filename, dtype=dtype, mode='w+', shape=(pylab.size(files), channel_data.size))
else:
data[i,:] = channel_data
data.flush()
i = i+1
print(i)
else:
channel_data = x['trace']
if data is None:
data = pylab.zeros(shape=(pylab.size(files), channel_data.size), dtype=dtype)
else:
data[i,:] = channel_data
i = i+1
print(i)
data_to_return = data
if save_for_spikedetekt:
if channels_to_save:
data2 = data[channels_to_save,:]
if return_sliced_data:
data_to_return = data2
else:
data2 = data
data2 = pylab.transpose(data2)
data2.reshape(data2.size)
filename = os.path.join(memmap_folder, 'spikedetekt_'+out_filename)
data2.astype(dtype).tofile(filename)
return data_to_return
示例13: getInfoCurve
def getInfoCurve():
"""
Various functions to calculate example parameter error bars as in
Neyrinck & Szapudi 2007, MNRAS 375, L51
"""
c = pt.Camb(hubble = 70., ombh2 = 0.05*(0.7)**2, omch2 = 0.25*(0.7)**2)
c.run()
c.kextend(-10,60) # necessary to make sigma(m) integral converge well.
pt.normalizePk(c,0.8) #sigma_8
outputdir = 'example/'
#Sheth-Tormen
h = halo.HaloModel(c,st_big_a = 0., st_little_a=0.707, stq = 0.3, k = 10.**M.arange(-2,1.01,0.25),massdivsperdex=5)
#For final calculations, should use more massdivsperdex, e.g. 20 (maybe 10 is ok)
#also, k is really coarse, as you'll see if you run this.
# get covariance matrix from halo-model trispectrum (saves it in the 'prefix' directory)
# it also automatically runs halo.getHaloPknl
halo.getHaloCov(outputdir,c,h)
# power spectrum at h.k (range of k at which halo model quantities are evaluated)
M.loglog(h.k,h.pnl)
M.show()
# get derivs wrt ln A, tilt
h.dloga = halo.getdlogPnldCosmoParam(c,h,'scalar_amp',linlog='log')
h.dtilt = halo.getdlogPnldCosmoParam(c,h,'scalar_spectral_index',linlog='lin')
M.loglog(h.k,h.dloga**2,label='ln A')
M.loglog(h.k,h.dtilt**2,label='tilt')
M.legend()
M.show()
M.save(outputdir+'dlogpnldloga.dat',M.transpose([h.k,h.dloga]),fmt='%6.5e')
M.save(outputdir+'dlogpnldtilt.dat',M.transpose([h.k,h.dtilt]),fmt='%6.5e')
# get parameter covariance matrix (just a function of k, since there's only one variable)
k, covmat = info.getParamCovMat(outputdir,dlogfilenames=['dlogpnldloga.dat','dlogpnldtilt.dat'])
# plot the unmarginalized error bars in ln A and the tilt,
# if the matter power spectrum is analyzed from k= k[0] to k.
M.loglog(k, M.sqrt(covmat[0,0,:]),label='ln A')
M.loglog(k, M.sqrt(covmat[1,1,:]),label='tilt')
M.legend()
M.show()
示例14: plot_simple
def plot_simple(in_file):
try:
data = pylab.loadtxt(sys.argv[1])
except:
raise IOError("Can't read %s." % in_file)
for i in pylab.transpose(data):
pylab.plot(i)
示例15: read_poscar
def read_poscar(self, filename):
"""Parses a POSCAR file"""
f = open(filename)
poscar = f.readlines()
f.close()
# First line should contain the atom names , eg. "Ag Ge" in
# the same order
# as later in the file (and POTCAR for the full vasp run)
atomNames = poscar[0].split()
self.lattice_constant = float(poscar[1])
# Now the lattice vectors
a = []
for vector in poscar[2:5]:
s = vector.split()
floatvect = float(s[0]), float(s[1]), float(s[2])
a.append( floatvect)
# Transpose to make natural ordering for linear algebra
self.basis_vectors = m.transpose(m.array(a))
# Number of atoms. Again this must be in the same order as
# in the first line
# and in the POTCAR file
numofatoms = poscar[5].split()
for i in xrange(len(numofatoms)):
numofatoms[i] = int(numofatoms[i])
if (len(atomNames) < i + 1):
atomNames.append("Unknown")
[self.atom_symbols.append(atomNames[i]) for n in xrange(numofatoms[i])]
# Check if Selective dynamics is switched on
sdyn = poscar[6]
add = 0
if sdyn[0] == "S" or sdyn[0] == "s":
add = 1
self.selective_dynamics = True
# Check if atom coordinates are cartesian or direct
acType = poscar[6+add]
if acType[0] == "C" or acType[0] == "c" or acType[0] == "K" or acType[0] == "k":
self.cartesian = 1
else:
self.cartesian = 0
offset = add+7
tot_natoms = sum(numofatoms)
self.atoms = m.zeros((tot_natoms, 3))
self.selective_flags = []
for atom in xrange(tot_natoms):
ac = poscar[atom+offset].split()
self.atoms[atom] = (float(ac[0]), float(ac[1]), float(ac[2]))
if self.selective_dynamics:
self.selective_flags.append((ac[3], ac[4], ac[5]))
if self.cartesian:
self.atoms *= self.lattice_constant