本文整理汇总了Python中numpy.amax函数的典型用法代码示例。如果您正苦于以下问题:Python amax函数的具体用法?Python amax怎么用?Python amax使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了amax函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_peaks_cf
def get_peaks_cf(data, win_size):
"""
data: audio as numpy array to be analyzed
win_size: value in samples to create the blocks for analysis
Used in calc_crest_factor, this function returns an array of peak levels
for each window.
return: array of peak audio levels
"""
if len(data) == 2:
# Seperate left and right channels
data_l = data[0,:]
data_r = data[1,:]
# Buffer up the data
data_matrix_l = librosa.util.frame(data_l, win_size, win_size)
data_matrix_r = librosa.util.frame(data_r, win_size, win_size)
# Get peaks for left and right channels
peaks_l = np.amax(np.absolute(data_matrix_l), axis=0)
peaks_r = np.amax(np.absolute(data_matrix_r), axis=0)
return np.maximum(peaks_l, peaks_r)
else:
data_matrix = librosa.util.frame(data, win_size, win_size)
return np.amax(np.absolute(data_matrix), axis=0)
示例2: test_known_parametrization
def test_known_parametrization():
R = 1
P = 1
toll = 2.e-3
n = 10
ii = np.linspace(0,1,n+1)
control_points_3d = np.asarray(np.zeros([n+1,3]))#[np.array([R*np.cos(5*i * np.pi / (n + 1)), R*np.sin(5*i * np.pi / (n + 1)), P * i]) for i in range(0, n+1)]
print (control_points_3d.shape)
control_points_3d[:,0] = np.array([R*np.cos(5*i * np.pi / (n + 1))for i in ii])
control_points_3d[:,1] = np.array([R*np.sin(5*i * np.pi / (n + 1))for i in ii])
control_points_3d[:,2] = np.array([P*i for i in range(n+1)])
vsl = AffineVectorSpace(UniformLagrangeVectorSpace(n+1),0,1)
arky = ArcLengthParametrizer(vsl, control_points_3d)
new_control_points_3d = arky.reparametrize()
#new_arky = ArcLengthParametrizer(vsl, new_control_points_3d)
#new_new_control_points_3d = arky.reparametrize()
tt = np.linspace(0, 1, 128)
vals = vsl.element(control_points_3d)(tt)
#print vals
new_vals = vsl.element(new_control_points_3d)(tt)
#print vals.shape, new_vals.shape
print (np.amax((np.abs(vals-new_vals))))
assert np.amax(np.abs(control_points_3d-new_control_points_3d))/P < toll
示例3: Seuil_var
def Seuil_var(img):
"""
This fonction compute threshold value. In first the image's histogram is calculated. The threshold value is set to the first indexe of histogram wich respect the following criterion : DH > 0, DH(i)/H(i) > 0.1 , H(i) < 0.01 % of the Norm.
In : img : ipl Image : image to treated
Out: seuil : Int : Value of the threshold
"""
dim=255
MaxValue=np.amax(np.asarray(img[:]))
Norm = np.asarray(img[:]).shape[0]*np.asarray(img[:]).shape[1]
scale=MaxValue/dim
Wdim=dim*scale
MaxValue=np.amax(np.asarray(img[:]))
bins= [float(x) for x in range(dim)]
hist,bin_edges = np.histogram(np.asarray(img[:]), bins)
Norm = Norm -hist[0]
median=np.median(hist)
mean=0
var=0
i=1
som = 0
while (som < 0.8*Norm and i <len(hist)-1):
som = som + hist[i]
i=i+1
while ((hist[i]-hist[i-1] < 0 or (hist[i]-hist[i-1])/hist[i-1]>0.1 or hist[i]> 0.01*Norm ) and i < len(hist)-1):
i=i+1
if( i == len(hist)-1):
seuil=0
seuil = i
var = 0
return seuil
示例4: run_sim
def run_sim(R_star, transit_duration, bodies):
"""Run 3-body sim and convert results to TTV + TDV values in [minutes]"""
# Run 3-body sim for one full orbit of the outermost moon
loop(bodies, orbit_duration)
# Move resulting data from lists to numpy arrays
ttv_array = numpy.array([])
ttv_array = ttv_list
tdv_array = numpy.array([])
tdv_array = tdv_list
# Zeropoint correction
middle_point = numpy.amin(ttv_array) + numpy.amax(ttv_array)
ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point)
ttv_array = numpy.divide(ttv_array, 1000) # km/s
# Compensate for barycenter offset of planet at start of simulation:
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array))
ttv_array = numpy.divide(ttv_array, stretch_factor)
# Convert to time units, TTV
ttv_array = numpy.divide(ttv_array, R_star)
ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24) # minutes
# Convert to time units, TDV
oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60 # m/sec
newspeed = oldspeed - numpy.amax(tdv_array)
difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
conversion_factor = difference / numpy.amax(tdv_array)
tdv_array = numpy.multiply(tdv_array, conversion_factor)
return ttv_array, tdv_array
示例5: CAOSpy_run
def CAOSpy_run(tstart,tstop,mc,pdyn,particles,leftover,drained):
timenow=tstart
#loop through time
while timenow < tstop:
print 'time:',timenow
[thS,npart]=pdyn.gridupdate_thS(particles.lat,particles.z,mc)
#define dt as Curant criterion
dt_D=(mc.mgrid.vertfac.values[0])**2 / (2*np.amax(mc.D[np.amax(thS),:]))
dt_ku=-mc.mgrid.vertfac.values[0]/np.amax(mc.ku[np.amax(thS),:])
dt=np.amin([dt_D,dt_ku])
#INFILT
p_inf=cinf.pmx_infilt(timenow,precTS,mc,dt,leftover)
#print timenow
#print p_inf
particlesnow=pd.concat([particles,p_inf])
#p_backup=particlesnow.copy()
#DIFFUSION
[particlesnow,thS,npart,phi_mx]=pdyn.part_diffusion_split(particlesnow,npart,thS,mc,dt,True,10)
#ADVECTION
particlesnow=pdyn.mac_advection(particlesnow,mc,thS,dt)
#drained particles
drained=drained.append(particlesnow[particlesnow.flag==len(mc.maccols)+1])
particlesnow=particlesnow[particlesnow.flag!=len(mc.maccols)+1]
#MX-MAC-INTERACTION
pdyn.mx_mp_interact(particlesnow,npart,thS,mc,dt)
pondparts=(particlesnow.z<0.)
leftover=np.count_nonzero(-pondparts)
particles=particlesnow[pondparts]
timenow=timenow+dt
return(particles,npart,thS,leftover,drained,timenow)
示例6: create_histogram
def create_histogram (mu, sigma, weights, bin_size, low_spec, high_spec, cu1_accepted, t1_failure_pos):
p1 = figure(title="Normal Distribution",tools = "pan,box_select,box_zoom,xwheel_zoom,reset,save,resize", background_fill="#E8DDCB")
measured = np.random.normal(mu, sigma, 1000)
hist, edges = np.histogram(weights, density=True, bins=bin_size)
x = np.linspace(np.amin(weights), np.amax(weights), 1000)
pdf = 1/(sigma * np.sqrt(2*np.pi)) * np.exp(-(x-mu)**2 / (2*sigma**2))
cdf = (1+scipy.special.erf((x-mu)/np.sqrt(2*sigma**2)))/2
p1.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649",\
)
sort_weights = sorted(weights)
cu1_yield = round(float(len(cu1_accepted))/(float(len(cu1_accepted)) + float(len(t1_failure_pos))),2)
p1.line(x, pdf, line_color="#D95B43", line_width=8, alpha=0.7, legend="PDF")
p1.line(low_spec, y=[0, np.amax(hist)], line_dash=[4, 4], line_color="orange", line_width=3, alpha=.5)
p1.line(high_spec, y=[0, np.amax(hist)], line_dash=[4, 4], line_color="orange", line_width=3, alpha=.5)
p1.line(weights[0], 0, line_width=1, legend='Mean = ' + str(round(mu, 3))) #daily rejected
p1.line(weights[0], 0, line_width=1, legend='2*Std (Std = ' + str(round(sigma, 3)) + ")") #daily accepted
p1.line(weights[0], 0, line_width=1, legend='Yield: ' + str(cu1_yield)) #daily rejected
p1.line(weights[0], 0, line_width=1, legend='Accepted: ' + str(len(cu1_accepted))) #daily accepted
p1.line(weights[0], 0, line_width=1, legend='Rejected: ' + str(len(t1_failure_pos))) #daily rejected
p1.xaxis.bounds = (np.amin(weights), np.amax(weights))
p1.legend.orientation = "top_left"
p1.xaxis.axis_label = 'Weight (g)'
p1.yaxis.axis_label = 'Pr(x)'
return p1
示例7: iff_filter
def iff_filter(sig, scale, plot_show = 0):
order = max(sig.size*scale,90)
#order = 80
# Extend signal on both sides for removing boundary effect in convolution
sig_extend = np.ones(sig.size+int(order/2)*2)
sig_extend[int(order/2):(sig.size+int(order/2))] = sig
sig_extend[0:int(order/2)] = sig[(sig.size-int(order/2)):sig.size]
sig_extend[(sig.size+int(order/2)):sig_extend.size] = sig[0:int(order/2)]
# convolve with hamming window and normalize
smooth_sig = np.convolve(sig_extend,np.hamming(order),'same')
smooth_sig = smooth_sig[int(order/2):(sig.size+int(order/2))]
smooth_sig = np.amax(sig)/np.amax(smooth_sig)*smooth_sig
# Plot signal for debug
if(plot_show == 1):
fig, ax = plt.subplots(ncols=2)
ax[0].plot(sig)
ax[0].plot(smooth_sig,'-r')
ax[0].plot(med_sig,'black')
ax[1].loglog(rfft(sig))
ax[1].loglog(rfft(smooth_sig),'-r')
ax[1].loglog(rfft(med_sig),'black')
plt.show()
return smooth_sig
示例8: main
def main(X, Xtest, time):
global cut
global count
cut = 0
count = 0
root = node()
root.trainData = X
root.testData = Xtest
print("shape of xtest in main: ",np.shape(Xtest))
x1 = min(np.amin(X[:,[0]]), np.amin(Xtest[:,[0]]))-.05
x2 = max(np.amax(X[:,[0]]), np.amax(Xtest[:,[0]]))+.1
y1 = min(np.amin(X[:,[1]]), np.amin(Xtest[:,[1]]))-.05
y2 = max(np.amax(X[:,[1]]), np.amax(Xtest[:,[1]]))+.1
plt.figure()
plt.axis([x1,x2,y1,y2])
print("x1 x2 y1 y2: ",x1,x2,y1,y2)
root.coordinates.append([x1,x2])
root.coordinates.append([y1,y2])
leaves = []
MP(root,time,leaves)
point_index = {}
train_key = list(map(tuple,X))
test_key = list(map(tuple,Xtest))
x_shape = np.shape(X)
for i in range(x_shape[0]):
point_index[train_key[i]] = i
Xtest_shape = np.shape(Xtest)
for i in range(0,Xtest_shape[0]):
point_index[test_key[i]] = i
# plt.show()
plt.close()
return feature(leaves, point_index)
示例9: mamPlot
def mamPlot(funct,args):
pl=args[0]
x=np.array([])
ymin=np.array([])
yavg=np.array([])
ymax=np.array([])
f=np.array([])
x=np.append(x,funct.rmsSet[:,0])
ymin=np.append(ymin,funct.rmsSet[:,1])
ymax=np.append(ymax,funct.rmsSet[:,2])
t1=funct.rmsSet[:,3]
t2=funct.rmsSet[:,5]
yavg=np.append(yavg,t1/t2)
f=np.append(f,funct.rmsSet[:,5])
if centroidP(x,yavg):
pl.set_yscale('log')
pl.set_xscale('log')
else:
pl.ticklabel_format(axis='both', style='sci', scilimits=(-2,5),pad=5,direction="bottom")
pl.axis([0, np.amax(x)+(2*np.amax(x)/100), 0, np.amax(ymax)+(2*np.amax(ymax)/100)])
pl.set_xlabel('read memory size',fontsize=8)
pl.set_ylabel("cost",fontsize=8)
pl.grid(True)
pl.set_title("Min/Avg/Max Cost",fontsize=14)
pl.tick_params(axis='x', labelsize=7)
pl.tick_params(axis='y', labelsize=7)
sc=pl.scatter(x,ymax,s=7,c='r', marker = 'o',lw=0.0)
sc1=pl.scatter(x,yavg,s=5.5,c='g', marker = 'o',lw=0.0)
sc2=pl.scatter(x,ymin,s=4,c='b', marker = 'o',lw=0.0)
pl.legend((sc2,sc1,sc),("Min","Avg","Max"),scatterpoints=1,ncol=3,bbox_to_anchor=[0.5, mamAdjust],loc="lower center",fontsize=8)
pylab.close()
示例10: checkcl
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
示例11: test_make_tone_regular_at_caldb
def test_make_tone_regular_at_caldb():
fq = 15000
db = 100
fs = 100000
dur = 1
risefall = 0.002
calv = 0.1
caldb = 100
npts = fs*dur
tone, timevals = tools.make_tone(fq, db, dur, risefall, fs, caldb, calv)
assert len(tone) == npts
assert len(timevals) == npts
spectrum = np.fft.rfft(tone)
peak_idx = (abs(spectrum - max(spectrum))).argmin()
freq_idx = np.around(fq*(float(npts)/fs))
assert peak_idx == freq_idx
if tools.USE_RMS is True:
print 'tone max', np.around(np.amax(tone), 5), calv*np.sqrt(2)
assert np.around(np.amax(tone), 5) == np.around(calv*np.sqrt(2),5)
else:
assert np.around(np.amax(tone), 5) == calv
assert timevals[-1] == dur - (1./fs)
示例12: _write_data
def _write_data(lock, im, index, outfile, outshape, outtype, rescale_factor, logfilename, cputime, itime):
lock.acquire()
try:
t0 = time()
f_out = getHDF5(outfile, 'a')
f_out_dset = f_out.require_dataset('exchange/data', outshape, outtype, chunks=tdf.get_dset_chunks(outshape[0]))
im = im * rescale_factor
tdf.write_tomo(f_out_dset,index,im.astype(outtype))
# Set minimum and maximum:
if (amin(im[:]) < float(f_out_dset.attrs['min'])):
f_out_dset.attrs['min'] = str(amin(im[:]))
if (amax(im[:]) > float(f_out_dset.attrs['max'])):
f_out_dset.attrs['max'] = str(amax(im[:]))
f_out.close()
t1 = time()
# Print out execution time:
log = open(logfilename,"a")
log.write(linesep + "\ttomo_%s processed (CPU: %0.3f sec - I/O: %0.3f sec)." % (str(index).zfill(4), cputime, t1 - t0 + itime))
log.close()
finally:
lock.release()
示例13: basemap_raster_mercator
def basemap_raster_mercator(lon, lat, grid, cmap = None):
"""
Render a raster in mercator projection. Locations with no values are
rendered transparent.
"""
# longitude/latitude extent
lons = (np.amin(lon), np.amax(lon))
lats = (np.amin(lat), np.amax(lat))
if cmap is None:
cmap = mpl.cm.jet
cmap.set_bad('w', 1.0)
# construct spherical mercator projection for region of interest
m = Basemap(projection='merc',llcrnrlat=lats[0], urcrnrlat=lats[1],
llcrnrlon=lons[0],urcrnrlon=lons[1])
vmin,vmax = np.nanmin(grid),np.nanmax(grid)
masked_grid = np.ma.array(grid,mask=np.isnan(grid))
fig = plt.figure(frameon=False)
plt.axis('off')
m.pcolormesh(lon,lat,masked_grid,latlon=True,cmap=cmap,vmin=vmin,vmax=vmax)
str_io = StringIO.StringIO()
plt.savefig(str_io,bbox_inches='tight',format='png',pad_inches=0,transparent=True)
bounds = [ (lons[0],lats[0]),(lons[1],lats[0]),(lons[1],lats[1]),(lons[0],lats[1]) ]
return str_io.getvalue(), bounds
示例14: statprint
def statprint(host_per_pg, pg_per_host):
val = pg_per_host.values() # sets val to a list of the values in pg_per_host
mean = numpy.mean(val)
maxvalue = numpy.amax(val)
minvalue = numpy.amin(val)
std = numpy.std(val)
median = numpy.median(val)
variance = numpy.var(val)
print("for placement groups on hosts: ")
print( "the mean is: ", mean)
print( "the max value is: ", maxvalue)
print( "the min value is: ", minvalue)
print( "the standard deviation is: ", std)
print( "the median is: ", median)
print( "the variance is: ", variance)
# prints statements for stats
host_mean = numpy.mean(host_per_pg)
host_max = numpy.amax(host_per_pg)
host_min = numpy.amin(host_per_pg)
host_std = numpy.std(host_per_pg)
host_median = numpy.median(host_per_pg)
host_variance = numpy.var(host_per_pg)
# these are the variables for hosts/pgs
print("hosts per placement group: ")
print("the mean is: ", host_mean)
print("the max value is: ", host_max)
print("the min value is: ", host_min)
print("the standard deviation is: ", host_std)
print("the median is: ", host_median)
print("the variance is: ", host_variance)
示例15: testRotMatOfExpMap
def testRotMatOfExpMap(numpts):
"""Test rotation matrix from axial vector"""
print '* checking case of 1D vector input'
map = numpy.zeros(3)
rmat_1 = rotMatOfExpMap_orig(map)
rmat_2 = rotMatOfExpMap_opt(map)
print 'resulting shapes: ', rmat_1.shape, rmat_2.shape
#
#
map = numpy.random.rand(3, numPts)
map = numpy.zeros([3, numPts])
map[0, :] = numpy.linspace(0, numpy.pi, numPts)
#
print '* testing rotMatOfExpMap with %d random points' % numPts
#
t0 = time.clock()
rmat_1 = rotMatOfExpMap_orig(map)
et1 = time.clock() - t0
#
t0 = time.clock()
rmat_2 = rotMatOfExpMap_opt(map)
et2 = time.clock() - t0
#
print ' timings:\n ... original ', et1
print ' ... optimized', et2
#
drmat = numpy.absolute(rmat_2 - rmat_1)
print 'maximum difference between results'
print numpy.amax(drmat, 0)
return