本文整理汇总了Python中pylab.concatenate函数的典型用法代码示例。如果您正苦于以下问题:Python concatenate函数的具体用法?Python concatenate怎么用?Python concatenate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了concatenate函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: gp_plot_prediction
def gp_plot_prediction(predict_x, mean, variance = None):
"""
Plot a gp's prediction using pylab including error bars if variance specified
Error bars are 2 * standard_deviation as in GP for ML book
"""
from pylab import plot, concatenate, fill
if None != variance:
# check variances are just about +ve - could signify a bug if not
#assert variance.all() > -1e-10
data = [
(x,y,max(v,0.0))
for x,y,v
in zip( predict_x, mean.flat, variance )
]
else:
data = [
(x,y)
for x,y
in zip( predict_x, mean )
]
data.sort( key = lambda d: d[0] ) # sort on X axis
predict_x = [ d[0] for d in data ]
predict_y = np.array( [ d[1] for d in data ] )
plot( predict_x, predict_y, color='k', linestyle=':' )
if None != variance:
sd = np.sqrt( np.array( [ d[2] for d in data ] ) )
var_x = concatenate((predict_x, predict_x[::-1]))
var_y = concatenate((predict_y + 2.0 * sd, (predict_y - 2.0 * sd)[::-1]))
p = fill(var_x, var_y, edgecolor='w', facecolor='#d3d3d3')
示例2: homog2D
def homog2D(xPrime, x):
"""
Compute the 3x3 homography matrix mapping a set of N 2D homogeneous
points (3xN) to another set (3xN)
"""
numPoints = xPrime.shape[1]
assert numPoints >= 4
A = None
for i in range(0, numPoints):
xiPrime = xPrime[:, i]
xi = x[:, i]
Ai_row0 = pl.concatenate((pl.zeros(3), -xiPrime[2] * xi, xiPrime[1] * xi))
Ai_row1 = pl.concatenate((xiPrime[2] * xi, pl.zeros(3), -xiPrime[0] * xi))
Ai = pl.row_stack((Ai_row0, Ai_row1))
if A is None:
A = Ai
else:
A = pl.vstack((A, Ai))
U, S, V = pl.svd(A)
V = V.T
h = V[:, -1]
H = pl.reshape(h, (3, 3))
return H
示例3: example
def example():
from pylab import rand, ones, concatenate
import matplotlib.pyplot as plt
# EXAMPLE data code from:
# http://matplotlib.sourceforge.net/pyplots/boxplot_demo.py
# fake up some data
spread= rand(50) * 100
center = ones(25) * 50
flier_high = rand(10) * 100 + 100
flier_low = rand(10) * -100
data =concatenate((spread, center, flier_high, flier_low), 0)
# fake up some more data
spread= rand(50) * 100
center = ones(25) * 40
flier_high = rand(10) * 100 + 100
flier_low = rand(10) * -100
d2 = concatenate( (spread, center, flier_high, flier_low), 0 )
data.shape = (-1, 1)
d2.shape = (-1, 1)
#data = [data, d2, d2[::2,0]]
data = [data, d2]
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.set_xlim(0,4)
percentile_box_plot(ax, data, [2,3])
plt.show()
示例4: homog3D
def homog3D(points2d, points3d):
"""
Compute a matrix relating homogeneous 3D points (4xN) to homogeneous
2D points (3xN)
Not sure why anyone would do this. Note that the returned transformation
*NOT* an isometry. But it's here... so deal with it.
"""
numPoints = points2d.shape[1]
assert numPoints >= 4
A = None
for i in range(0, numPoints):
xiPrime = points2d[:, i]
xi = points3d[:, i]
Ai_row0 = pl.concatenate((pl.zeros(4), -xiPrime[2] * xi, xiPrime[1] * xi))
Ai_row1 = pl.concatenate((xiPrime[2] * xi, pl.zeros(4), -xiPrime[0] * xi))
Ai = pl.row_stack((Ai_row0, Ai_row1))
if A is None:
A = Ai
else:
A = pl.vstack((A, Ai))
U, S, V = pl.svd(A)
V = V.T
h = V[:, -1]
P = pl.reshape(h, (3, 4))
return P
示例5: drawBetween
def drawBetween(x, yl, yh, col, lw, alpha = 1, plot = pylab.plot) :
fx = pylab.concatenate( (x,x[::-1]) )
fy = pylab.concatenate( (yh,yl[::-1]) )
# probably does not work with log??
p = pylab.fill(fx, fy, facecolor=col, lw = 0, alpha = alpha)
if lw :
plot(x, yl, x, yh, aa = 1, alpha = alpha, lw = lw, color='k')
示例6: density_plot
def density_plot ( x, D ):
"""Plot the density D along with a confidence region"""
# TODO: pass parameters through (e.g. color, axes, ...)
fx = D(x)
x_ = pl.concatenate ( (x, x[::-1]) )
fx_ = pl.clip(pl.concatenate ( (fx+D.c,fx[::-1]-D.c) ), 0, pl.inf )
pl.fill ( x_, fx_, edgecolor=[.5]*3, facecolor=[.8]*3 )
pl.plot ( x, fx, color=[0]*3 )
示例7: shadowing
def shadowing(self):
"Select the shadowed antennas from the FLAG column and return the index of the shadowed measurement and the percentage of shadowing "
indexFlag=pl.concatenate((pl.where(self.f==1)[0],pl.where(self.ff[0,0,])[0],pl.where(self.ff[1,0,])[0]))
indexNoFlag=pl.concatenate((pl.where(self.f==0)[0],pl.where(self.ff[0,0,]==False)[0],pl.where(self.ff[1,0,]==False)[0]))
Ntot=len(indexFlag)+len(indexNoFlag)
fractionShadow=100.*len(indexFlag)/Ntot
return(indexFlag,fractionShadow)
示例8: int_peak
def int_peak(self,fitrange=None, intrange=None, normalize=False, plot=False, npoints=10):
"""
Fits a linear background, subtracts the background, and integrates. Intended to be used for integrating peaks.
wavelen : list
list of wavelengths in nm. Can be sorted from low to high or high to low
lum : list
list of luminescence
fitrange : 2-element list, optional
Defaults to the span of the data. Input: [low nm, high nm]
intrange : 2-element list, optional
Defaults to the span of the data or fitrange (if given). Input: [low nm, high nm]
normalize : boolean, optional
Default is False
plot : boolean, optional
Default is False. Plots the original data, the linear background, and the data with the background subtracted
npoints : int
Default is 10. Number of points above and below the given fitrange point to average over.
"""
if fitrange is None:
fitindex=[0+npoints/2, len(self._wavelen)-1-npoints/2]
else:
fitindex=[0, 0]
fitindex[0]=py.where(self._wavelen>fitrange[0])[0][0]
fitindex[1]=py.where(self._wavelen>fitrange[1])[0][0]
wavelenfit=py.concatenate((self._wavelen[fitindex[0]-npoints/2:fitindex[0]+npoints/2],
self._wavelen[fitindex[1]-npoints/2:fitindex[1]+npoints/2]))
lumfit=py.concatenate((self._lum[fitindex[0]-npoints/2:fitindex[0]+npoints/2],
self._lum[fitindex[1]-npoints/2:fitindex[1]+npoints/2]))
linearfit = py.polyfit(wavelenfit, lumfit, 1)
linear_bg = py.polyval( linearfit, self._wavelen[fitindex[0]:fitindex[1]+1] )
wavelen_bg = self._wavelen[fitindex[0]:fitindex[1]+1].copy()
lum_bg = self._lum[fitindex[0]:fitindex[1]+1].copy()
lum_bg -= linear_bg
if plot is True:
py.plot(self._wavelen,self._lum,'k')
py.plot(wavelen_bg,linear_bg,'k:')
py.plot(wavelen_bg,lum_bg,'r')
py.show()
intindex=[0,0]
if intrange is None:
wavelen_int = wavelen_bg
lum_int = lum_bg
else:
intindex[0]=py.where(wavelen_bg>intrange[0])[0][0]
intindex[1]=py.where(wavelen_bg>intrange[1])[0][0]
wavelen_int = wavelen_bg[intindex[0]:intindex[1]+1]
lum_int = lum_bg[intindex[0]:intindex[1]+1]
peak_area = py.trapz(lum_int, x=wavelen_int)
return peak_area
示例9: set_pdf
def set_pdf(self, x, p, Nrl = 1000):
"""Generate the lookup tables.
x is the value of the random variate
pdf is its probability density
cdf is the cumulative pdf
inversecdf is the inverse look up table
"""
self.x = x
self.pdf = p/p.sum() #normalize it
self.cdf = self.pdf.cumsum()
self.inversecdfbins = Nrl
self.Nrl = Nrl
y = pylab.arange(Nrl)/float(Nrl)
delta = 1.0/Nrl
self.inversecdf = pylab.zeros(Nrl)
self.inversecdf[0] = self.x[0]
cdf_idx = 0
for n in xrange(1,self.inversecdfbins):
while self.cdf[cdf_idx] < y[n] and cdf_idx < Nrl:
cdf_idx += 1
self.inversecdf[n] = self.x[cdf_idx-1] + (self.x[cdf_idx] - self.x[cdf_idx-1]) * (y[n] - self.cdf[cdf_idx-1])/(self.cdf[cdf_idx] - self.cdf[cdf_idx-1])
if cdf_idx >= Nrl:
break
self.delta_inversecdf = pylab.concatenate((pylab.diff(self.inversecdf), [0]))
示例10: fixation_box_samples
def fixation_box_samples(all_x, all_y, fix_w, dwell_times, f_samp = 200.0):
"""Collect all x and ys for all trials for when the eye is within the fixation
box."""
n_trials = len(all_x)
in_fix_box_x = pylab.array([],dtype=float)
in_fix_box_y = pylab.array([],dtype=float)
for tr in range(n_trials):
if dwell_times[tr,0] >= 0:
# We got a fixation
start_idx = int(f_samp * dwell_times[tr,0]/1000.0)
end_idx = -1
if dwell_times[tr,1] >= 0:
end_idx = int(f_samp * dwell_times[tr,1]/1000.0) - 5
in_fix_box_x = pylab.concatenate((in_fix_box_x, all_x[tr][start_idx:end_idx]))
in_fix_box_y = pylab.concatenate((in_fix_box_y, all_y[tr][start_idx:end_idx]))
return in_fix_box_x, in_fix_box_y
示例11: getCloneReplicates
def getCloneReplicates(self, clone, w, applyFilter=False):
'''Retrieve all growth curves for a clone+well'''
# Check if any other replicates should be returned
# retArray is a 2xN multidimensional numpy array
retArray = py.array([])
first = True
for rep in self.replicates[clone]:
# Get replicate
filterMe = self.dataHash[clone][rep][w]['filter']
currCurve = self.dataHash[clone][rep][w]['od']
# Check if filter is enabled and curve should be filtered
if applyFilter and filterMe:
continue
# Create multidimensional array if first
elif first:
retArray = py.array([currCurve])
first = False
# Append to multidimensional array if not first
else:
retArray = py.concatenate((retArray,
py.array([currCurve])))
return retArray
示例12: getCloneReplicates
def getCloneReplicates(self, clone, source, condition, applyFilter=False):
'''Retrieve all growth curves for a clone+source+condition'''
# Check if any other replicates should be returned
# retArray is a 2xN multidimensional numpy array
retArray = py.array([])
first = True
for i in xrange(1, self.numReplicates[clone] + 1):
# Get replicate
filterMe = self.dataHash[clone][i][source][condition]['filter']
currCurve = self.dataHash[clone][i][source][condition]['od']
# Check if filter is enabled and curve should be filtered
if applyFilter and filterMe:
continue
# Create multidimensional array if first
elif first:
retArray = py.array([currCurve])
first = False
# Append to multidimensional array if not first
else:
retArray = py.concatenate((retArray,
py.array([currCurve])))
return retArray
示例13: old_spike_psth
def old_spike_psth(data, t1_ms = -250., t2_ms = 0., bin_ms = 10):
"""Uses data format returned by get_spikes"""
spike_time_ms = data['spike times ms']
N_trials = data['trials']
t2_ms = pylab.ceil((t2_ms - t1_ms) / bin_ms)*bin_ms + t1_ms
N_bins = (t2_ms - t1_ms) / bin_ms
if N_trials > 0:
all_spikes_ms = pylab.array([],dtype=float)
for trial in range(len(spike_time_ms)):
if spike_time_ms[trial] is None:
continue
idx = pylab.find((spike_time_ms[trial] >= t1_ms) &
(spike_time_ms[trial] <= t2_ms))
all_spikes_ms = \
pylab.concatenate((all_spikes_ms, spike_time_ms[trial][idx]))
spike_n_bin, bin_edges = \
pylab.histogram(all_spikes_ms, bins = N_bins,
range = (t1_ms, t2_ms), new = True)
spikes_per_trial_in_bin = spike_n_bin/float(N_trials)
spike_rate = 1000*spikes_per_trial_in_bin/bin_ms
else:
spike_rate = pylab.nan
bin_center_ms = (bin_edges[1:] + bin_edges[:-1])/2.0
return spike_rate, bin_center_ms
示例14: datagen
def datagen(N):
"""
Produces N pairs of training data and desired output;
each sample of training data contains -1 in its first position,
this corresponds to the interpretation of the threshold as first
element of the weight vector
"""
fun1 = lambda x1,x2: -2*x1**3-x2+.5*x1**2
fun2 = lambda x1,x2: x1**2*x2+2*x1*x2+1
fun3 = lambda x1,x2: .5*x1*x2**2+x2**2-2*x1**2
rarr1 = rand(1,N)
rarr2 = rand(1,N)
teacher = sign(rand(1,N)-.5)
idplus = (teacher<0)
idminus = -idplus
rarr1[idplus] = rarr1[idplus]-1
y1=fun1(rarr1,rarr2)
y2=fun2(rarr1,rarr2)
y3=fun3(rarr1,rarr2)
x=transpose(concatenate((-ones((1,N)),y1,y2)))
return x, teacher[0]
示例15: px_smooth
def px_smooth(idx, e, x, idx_table, N_HE0, N_US, N_US_HE, WC):
"""Over sample, smooth and undersample photoionization cross-sections
"""
i, nmin, ntot, m, l, p, pos = idx_table[idx]
try:
# case of TOPBASE data
nmin.index(".")
nmin = pl.nan
except ValueError:
nmin = int(nmin)
# Keep sampling for high energy values where the variation follow Kramer's law
if isinstance(int(ntot) - nmin, int):
N_HE = int(ntot) - nmin
else:
N_HE = N_HE0
if N_HE >= e.size:
N_HE = -e.size
print("Warning: N_HE is larger than photoionization table, select all the table.")
e_sel = e[:-N_HE]
e_sel_log = pl.log10(e_sel)
x_sel = x[:-N_HE]
# Interpolate and smooth data
# e_i = pl.linspace(min(e_sel), max(e_sel), 10000)
e_i_log = pl.linspace(min(e_sel_log), max(e_sel_log), 10000)
e_i = 10 ** e_i_log
x_i = pl.interp(e_i, e_sel, x_sel)
x_is = smooth(x_i, WC)
e_us = pl.concatenate([e_i[0:10], e_i[::N_US], e[int(ntot) - N_HE :: N_US_HE]])
x_us = pl.concatenate([x_is[0:10], x_is[::N_US], x[int(ntot) - N_HE :: N_US_HE]])
if x_us.any() == 0.0:
print("x_us = 0")
quit(1)
# Conservation of area
# area = pl.trapz( x_Mb, e_eV) # total
# area = pl.trapz( e_sel, x_sel) # selected
area_i = pl.trapz(x_i, e_i) # selected interpolated
area_is = pl.trapz(x_is, e_i) # selected interpolated and sampled
# area_us = pl.trapz(x_us, e_us)
return e_us, x_us, area_i, area_is