本文整理汇总了Python中scipy.mean函数的典型用法代码示例。如果您正苦于以下问题:Python mean函数的具体用法?Python mean怎么用?Python mean使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了mean函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: PrintValues
def PrintValues( outfile, values, options, prefix = "",titles = None):
if options.flat or options.aggregate_column:
if options.add_header:
if prefix: outfile.write( "prefix\t" )
if titles: outfile.write( "column\t" )
print "\t".join( ("nval", "min", "max", "mean", "median", "stddev", "sum", "q1", "q3" ) )
for x in range(len(values)):
vals = values[x]
if len(vals) == 0:
if options.output_empty:
if titles: outfile.write( titles[x] + "\t" )
if prefix: outfile.write( prefix + "\t" )
outfile.write( "0" + "\tna" * 8 + "\n" )
continue
if titles: outfile.write( titles[x] + "\t" )
if prefix: outfile.write( prefix + "\t" )
vals.sort()
if len(vals) > 4:
q1 = options.value_format % vals[len(vals) // 4]
q3 = options.value_format % vals[len(vals) * 3 // 4]
else:
q1 = options.value_format % vals[0]
q3 = options.value_format % vals[-1]
outfile.write( "\t".join( ( "%i" % len(vals),
options.value_format % float(min(vals)),
options.value_format % float(max(vals)),
options.value_format % scipy.mean(vals),
options.value_format % scipy.median(vals),
options.value_format % scipy.std(vals),
options.value_format % reduce( lambda x, y: x+y, vals),
q1, q3,
)) + "\n")
else:
if titles:
print "category\t%s" % string.join(titles,"\t")
print "count\t%s" % (string.join( map(lambda v: "%i" % len(v), values), "\t"))
print "min\t%s" % (string.join( map(lambda v: options.value_format % min(v), values), "\t"))
print "max\t%s" % (string.join( map(lambda v: options.value_format % max(v), values), "\t"))
print "mean\t%s" % (string.join( map(lambda v: options.value_format % scipy.mean(v), values), "\t"))
print "median\t%s" % (string.join( map(lambda v: options.value_format % scipy.median(v), values), "\t"))
print "stddev\t%s" % (string.join( map(lambda v: options.value_format % scipy.std(v), values), "\t"))
print "sum\t%s" % (string.join( map(lambda v: options.value_format % reduce( lambda x,y: x+y, v), values), "\t"))
print "q1\t%s" % (string.join( map(lambda v: options.value_format % scipy.stats.scoreatpercentile(v,per=25), values), "\t"))
print "q3\t%s" % (string.join( map(lambda v: options.value_format % scipy.stats.scoreatpercentile(v,per=75), values), "\t"))
示例2: makeinputh5
def makeinputh5(Iono,basedir):
"""This will make a h5 file for the IonoContainer that can be used as starting
points for the fitter. The ionocontainer taken will be average over the x and y dimensions
of space to make an average value of the parameters for each altitude.
Inputs
Iono - An instance of the Ionocontainer class that will be averaged over so it can
be used for fitter starting points.
basdir - A string that holds the directory that the file will be saved to.
"""
# Get the parameters from the original data
Param_List = Iono.Param_List
dataloc = Iono.Cart_Coords
times = Iono.Time_Vector
velocity = Iono.Velocity
zlist,idx = sp.unique(dataloc[:,2],return_inverse=True)
siz = list(Param_List.shape[1:])
vsiz = list(velocity.shape[1:])
datalocsave = sp.column_stack((sp.zeros_like(zlist),sp.zeros_like(zlist),zlist))
outdata = sp.zeros([len(zlist)]+siz)
outvel = sp.zeros([len(zlist)]+vsiz)
# Do the averaging across space
for izn,iz in enumerate(zlist):
arr = sp.argwhere(idx==izn)
outdata[izn] = sp.mean(Param_List[arr],axis=0)
outvel[izn] = sp.mean(velocity[arr],axis=0)
Ionoout = IonoContainer(datalocsave,outdata,times,Iono.Sensor_loc,ver=0,
paramnames=Iono.Param_Names, species=Iono.Species,velocity=outvel)
Ionoout.saveh5(basedir/'startdata.h5')
示例3: compactDistance
def compactDistance(self, target, candidates):
#compare the candidates to the target accordin to some measure
targetarr = target.reshape((self.totalSize, 3))
candidatesarr = candidates.reshape((candidates.shape[0], self.totalSize, 3))
target_avg = scipy.mean(targetarr, axis=0)
candidates_avg = scipy.mean(candidatesarr, axis=1)
return scipy.sum((target_avg - candidates_avg)**2, axis=1)
示例4: makeinputh5
def makeinputh5(Iono,basedir):
basedir = Path(basedir).expanduser()
Param_List = Iono.Param_List
dataloc = Iono.Cart_Coords
times = Iono.Time_Vector
velocity = Iono.Velocity
zlist,idx = sp.unique(dataloc[:,2],return_inverse=True)
siz = list(Param_List.shape[1:])
vsiz = list(velocity.shape[1:])
datalocsave = sp.column_stack((sp.zeros_like(zlist),sp.zeros_like(zlist),zlist))
outdata = sp.zeros([len(zlist)]+siz)
outvel = sp.zeros([len(zlist)]+vsiz)
for izn,iz in enumerate(zlist):
arr = sp.argwhere(idx==izn)
outdata[izn]=sp.mean(Param_List[arr],axis=0)
outvel[izn]=sp.mean(velocity[arr],axis=0)
Ionoout = IonoContainer(datalocsave,outdata,times,Iono.Sensor_loc,ver=0,
paramnames=Iono.Param_Names, species=Iono.Species,velocity=outvel)
ofn = basedir/'startdata.h5'
print('writing {}'.format(ofn))
Ionoout.saveh5(str(ofn))
示例5: signalToNoiseRatio
def signalToNoiseRatio(self, xs):
""" What is the one-sample signal-to-noise ratio. """
rxs = repmat(xs, self.ESamples, 1).T
gs = self._df(rxs)
g2s = mean(gs **2, axis=1)
gs = mean(gs, axis=1)
return gs**2/g2s
示例6: plotmap
def plotmap(self,fig,ax):
""" This function will plot the map of Alaska. The data will be plotted
over it and will use the basemap class to position everything.
Input
fig - The figure handle for the plots.
ax - The axes handle that the map will be plotted over.
Output
m - This is the handle for the basemap object.
"""
latlim2 = self.params['latbounds']
lonlim2 = self.params['lonbounds']
m = Basemap(projection='merc',lon_0=sp.mean(lonlim2),lat_0=sp.mean(latlim2),\
lat_ts=sp.mean(latlim2),llcrnrlat=latlim2[0],urcrnrlat=latlim2[1],\
llcrnrlon=lonlim2[0],urcrnrlon=lonlim2[1],\
rsphere=6371200.,resolution='i',ax=ax)
# draw coastlines, state and country boundaries, edge of map.
#m.drawcoastlines()
# m.drawstates()
# m.drawcountries()
m.readshapefile('st99_d00','states',drawbounds=True)
merstep = sp.round_((lonlim2[1]-lonlim2[0])/5.)
parstep = sp.round_((latlim2[1]-latlim2[0])/5.)
meridians=sp.arange(lonlim2[0],lonlim2[1],merstep)
parallels = sp.arange(latlim2[0],latlim2[1],parstep)
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10)
m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10)
plt.hold(True)
return m
示例7: plot_optimal_tau_for_mean_uncertainty_reduction
def plot_optimal_tau_for_mean_uncertainty_reduction(
results_for_exp, results_for_exp_inftau):
""" Plot the optimal tau for the mean of uncertainty reduction.
:param results_for_exp: The results of one experiment as 4-D array of the
shape (metrics, z-values, tau-values, experimental repetitions).
:type results_for_exp: 4-D array
:param result_list_inftau: The results of one experiment for `tau = inf` as
3-D array of the shape (metrics, z-values, experimental repetitions).
:type results_for_exp_inftau: 3-D array.
"""
values = sp.empty((results_for_exp.shape[0], results_for_exp.shape[1]))
err = sp.empty((results_for_exp.shape[0], results_for_exp.shape[1], 2, 1))
mark = sp.empty((results_for_exp.shape[0], results_for_exp.shape[1]))
for m, metric in enumerate(cfg['metrics']):
for z in xrange(len(cfg['zs'])):
r = sp.mean(results_for_exp[m, z], axis=1)
mark[m, z] = r.max()
values[m, z] = sp.mean(cfg['time_scales'][r == r.max()]).magnitude
r = cfg['time_scales'][r > 0.8 * r.max()]
err[m, z, 0] = values[m, z] - min(r).magnitude
err[m, z, 1] = max(r).magnitude + values[m, z]
plot_param_per_metric_and_z(values, err)
plot_bool_indicator_per_metric_and_z(
sp.mean(results_for_exp_inftau, axis=2) >= mark)
示例8: estimate_performance_xgboost
def estimate_performance_xgboost(X,labels,param, num_round, folds):
'''
Cross validation for XGBoost performance
'''
f=open("summary_bst_scan.txt","a")
start = np.random.random_integers(1000) #time.time()
# Cross validate
kf = cv.KFold(labels.size, n_folds=folds, random_state=start)
# Dictionary to store all the AMSs
all_rmse = []
for train_indices, test_indices in kf:
X_train, X_test = X.loc[train_indices], X.loc[test_indices]
y_train, y_test = labels[train_indices], labels[test_indices]
xgmat = xgb.DMatrix(X_train, label=y_train)
plst = param.items()#+[('eval_metric', '[email protected]')]
watchlist = []#[(xgmat, 'train')]
bst = xgb.train(plst, xgmat, num_round, watchlist)
xgmat_test = xgb.DMatrix(X_test)
y_out = bst.predict(xgmat_test)
num=y_test.shape[0]
y_test=np.reshape(y_test,num)
rmse_score=rmse(y_out,y_test)
print('rmse={}'.format(rmse_score))
f.write('rmse={}'.format(rmse_score))
f.write('\n')
all_rmse.append(rmse_score)
print ("------------------------------------------------------")
print ("mean rmse ={} with std={}".format(sp.mean(all_rmse),sp.std(all_rmse)))
f.write("mean rmse ={} with std={}".format(sp.mean(all_rmse),sp.std(all_rmse)))
f.write('\n')
f.close()
示例9: execute
def execute(self):
self.power_mat, self.thermal_expectation = self.full_calculation()
n_chan = self.power_mat.shape[1]
n_freq = self.power_mat.shape[0]
# Calculate the the mean channel correlations at low frequencies.
low_f_mat = sp.mean(self.power_mat[1:4 * n_chan + 1,:,:], 0).real
# Factorize it into preinciple components.
e, v = linalg.eigh(low_f_mat)
self.low_f_mode_values = e
# Make sure the eigenvalues are sorted.
if sp.any(sp.diff(e) < 0):
raise RuntimeError("Eigenvalues not sorted.")
self.low_f_modes = v
# Now subtract out the noisiest channel modes and see what is left.
n_modes_subtract = 10
mode_subtracted_power_mat = sp.copy(self.power_mat.real)
mode_subtracted_auto_power = sp.empty((n_modes_subtract, n_freq))
for ii in range(n_modes_subtract):
mode = v[:,-ii]
amp = sp.sum(mode[:,None] * mode_subtracted_power_mat, 1)
amp = sp.sum(amp * mode, 1)
to_subtract = amp[:,None,None] * mode[:,None] * mode
mode_subtracted_power_mat -= to_subtract
auto_power = mode_subtracted_power_mat.view()
auto_power.shape = (n_freq, n_chan**2)
auto_power = auto_power[:,::n_chan + 1]
mode_subtracted_auto_power[ii,:] = sp.mean(auto_power, -1)
self.subtracted_auto_power = mode_subtracted_auto_power
示例10: plot_pairwise_velocities_r
def plot_pairwise_velocities_r(case,color,all_radial_distances,all_radial_velocities):
dr = 0.3 # Mpc/h
rmin, rmax = sp.amin(all_radial_distances), sp.amax(all_radial_distances)
rrange = rmax-rmin
N = int(sp.ceil(rrange/dr))
rs = sp.linspace(rmin,rmax,N)
v12_of_r = [[] for index in range(N)]
for r,v12 in zip(all_radial_distances,all_pairwise_velocities):
index = int(sp.floor((r-rmin)/dr))
v12_of_r[index].append(v12)
sigma_12s = sp.zeros(N)
v12_means = sp.zeros(N)
for index in range(len(sigma_12s)):
v12_of_r_index = sp.array(v12_of_r[index])
print "number of counts in the", index,"th bin:", len(v12_of_r_index)
sigma_12 = sp.sqrt(sp.mean(v12_of_r_index**2))
v12_mean = -sp.mean(v12_of_r_index)
sigma_12s[index] = sigma_12
v12_means[index] = v12_mean
plt.plot(rs,sigma_12s,color=color,label='$\sigma_{12}$')
plt.plot(rs,v12_means,color=color,label='$|v_{12}|$')
plt.xlabel('r [Mpc/h]')
plt.ylabel('[km/s]')
plt.xscale('log')
plt.axis([0.5,100,0,600])
示例11: plot_temporal_average
def plot_temporal_average( self,
color = 'g',
plot_std = True,
t_start = None,
label = None,
**kargs):
if 'ax'in kargs:
ax = kargs['ax']
else:
from matplotlib import pyplot
fig = pyplot.figure()
ax = fig.add_subplot(1,1,1)
allpixel = self.selectAndPreprocess( **kargs )
m = mean( allpixel , axis = 1 )
if t_start is None:
t = self.t()
else:
t = self.t() - self.t()[0] + t_start
ax.plot(t , m , color = color , linewidth = 2 , label = label)
if plot_std:
s = mean( allpixel , axis = 1 )
ax.fill_between(t , m+s , m-s , color = color , alpha = .3 , )
示例12: printy
def printy(s):
if ((s._num_updates * s.batch_size < 100
and s._num_updates % (20 / s.batch_size) == 0)
or s._num_updates % (100 / s.batch_size) == 0):
print s._num_updates * s.batch_size, #s.bestParameters,
s.provider.nextSamples(4)
print mean(s.provider.currentLosses(s.bestParameters))
示例13: _read_sky_logfile
def _read_sky_logfile(self):
#TODO : expand to read errors, msgs etc
# read in the whole sky log file, shouldn't be big
f = open(self.skylogfile)
lines = f.readlines()
f.close()
dust = [line.split()[1:] for line in lines if line.startswith('dtau_dust')]
line = [line.split()[1:] for line in lines if line.startswith('dtau_line')]
dust = _sp.array(dust, dtype='float')
line = _sp.array(line, dtype='float')
transitions = _sp.unique(dust[:,0])
shells = _sp.unique(dust[:,1])
dtau_dust = dict()
dtau_line = dict()
dtau_tot = dict()
for t in transitions:
d = []
l = []
for s in shells:
d.append( _sp.mean([i[2] for i in dust if ((i[0]==t) * (i[1]==s))]) )
l.append( _sp.mean([i[2] for i in line if ((i[0]==t) * (i[1]==s))]) )
dtau_dust[t] = _sp.copy(d)
dtau_line[t] = _sp.copy(l)
dtau_tot[t] = _sp.array(d) + _sp.array(l)
# create object to store in main class
class Tau(object):pass
Tau.dtau_dust = dtau_dust
Tau.dtau_line = dtau_line
Tau.dtau_tot = dtau_tot
Tau.transitions = transitions
Tau.shells = shells
self.Tau = Tau
示例14: pForest_vs_flann_20Trials
def pForest_vs_flann_20Trials(numTrees=10):
print "Comparing FLANN to Proximity Forest on 500 Random 2D Points"
flann_scores=[]
pf_scores=[]
discrepancies=[]
for i in range(20):
print "=============================================="
print "TRIAL: %d"%(i+1)
print "=============================================="
(nd, sum_flann, sum_pf) = pForest_vs_flann(numTrees=numTrees, verbose=False)
flann_scores.append(sum_flann)
pf_scores.append(sum_pf)
discrepancies.append(nd)
print "=============================================="
print "Discrepancies: %d, Cost per Discrepancy: %3.2f"%(nd,(sum_flann - sum_pf)*1.0/nd)
print "=============================================="
print "=============================================="
print "20 TRIAL SUMMARY"
print "Average Discrepancies: %3.2f"%( 1.0*sum(discrepancies)/len(discrepancies))
flann_scores = scipy.array(flann_scores)
pf_scores = scipy.array(pf_scores)
avg_delta_score = (sum(flann_scores) - sum(pf_scores))*1.0/len(discrepancies)
print "Average Cost Per Discrepancy: %3.2f"%avg_delta_score
print "Average FLANN Distance: %3.2f, StdDev: %3.2f"%(scipy.mean(flann_scores),scipy.std(flann_scores))
print "Average Proximity Forest Distance: %3.2f, StdDev: %3.2f"%(scipy.mean(pf_scores),scipy.std(pf_scores))
print "=============================================="
return (discrepancies, flann_scores, pf_scores)
示例15: test_psd_normalization
def test_psd_normalization():
''' This function tests the normalization of function psd. Mock data is
one second of normal, mean zero, std = 2 data sampled at
1kHz. Since this is white noise, the white noise level of the PSD times
the root of the bandwidth should give the rms amplitude of the
data (in this case rt(2)).
The normalization for a hanning window is also tested. Windowing
the data removes power from the time stream. The data must be
recalibrated in order to recover the best estimate of the white
noise level. For a hanning window the time stream must be multipled by
root(8/3) before the PSD is taken.
'''
# make fake data, window, window and rescale
x = sp.random.normal(0, 2, 10000)
wrx = window(x, 'hanning', 1)
ms_x = sp.mean(x ** 2)
ms_wrx = sp.mean(np.array(wrx) ** 2)
ratio = ms_x / ms_wrx
print ('MSA of timestream = %.4f\t\nMSA of windowed timestream = %.4f\nratio = %.4f' % (ms_x, ms_wrx, ratio))
# take PSDs
x_psd = psd(x, 381.47)
wrx_psd = psd(wrx, 381.47)
pylab.subplot(2, 1, 1)
pylab.title('Test psd normalization')
pylab.xlabel('Sample')
pylab.ylabel('Cnts')
pylab.plot(x, 'bo', wrx, 'ro')
pylab.subplot(2, 1, 2)
pylab.title('PSD')
pylab.xlabel('Frequency [Hz]')
pylab.ylabel('Cnts/rtHz')
pylab.loglog(x_psd[0], x_psd[1], 'b-', wrx_psd[0], wrx_psd[1], 'r-')
pylab.show()