本文整理汇总了Python中scipy.median函数的典型用法代码示例。如果您正苦于以下问题:Python median函数的具体用法?Python median怎么用?Python median使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了median函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_cluster_distribution
def get_cluster_distribution(g, method = 'average'):
"""
The clustering coefficient distribution grouped by degree. Similar to the histogram shows the possible degree k,
and average/median clustering coefficient of nodes with degree k in graph g.
Parameters:
-----------
g: NetworkX Graph
method: str, ('average', 'median'), (default = 'average')
Returns:
--------
xdata, ydata, a 2-tuple of array, (k, avg_cc(V_k)), where V_k are the nodes with degree k
"""
g = to_undirected(g)
k = nx.clustering(g)
d = g.degree()
ck = defaultdict(list)
for n in g.nodes_iter():
ck[d[n]].append(k[n])
xdata, ydata = list(), list()
if method == 'average':
for x, y in ifilter(lambda x: x[0] > 1 and average(x[1]) > 0, ck.iteritems()):
xdata.append(x)
ydata.append(average(y))
elif method == 'median':
for x, y in ifilter(lambda x: x[0] > 1 and median(x[1]) > 0, ck.iteritems()):
xdata.append(x)
ydata.append(median(y))
else:
raise NameError("method should be 'average' or 'mean'")
xdata = array(xdata)
ydata = array(ydata)
return(xdata, ydata)
示例2: plot_collated
def plot_collated(r_set="truth", infl_set="varinfl-0.25", subplots=True, save=False):
d = cl("%s/output-2013/sim3-results_r-%s_%s"%(DATA_DIR,r_set, infl_set))
coverages = SP.array(range(20,200,20) + range(200,1001,100)) #range(200,500,50) + range(500,1001,100))
if r_set == "truth": coverages = SP.array(range(20,200,20) + range(200,500,50) + range(500,1001,100))
afs = map(lambda x:"%.2f"%x, [0.7,0.85,0.99])
models = ['sQTL','Smooth','ML','MP']
p = 0
colors = 'bgry'
if subplots: PL.figure(figsize=(14,10))
for feature in 'FX':
for af in afs:
if subplots: PL.subplot(2,3,p+1)
else: PL.figure()
p += 1
lines = []
for i,model in enumerate(models):
I = SP.where(d[af][model][feature].var(axis=0) > 1e-10)[0]
err = d[af][model][feature][:,I].var(axis=1)**0.5
lines.append(PL.plot(coverages + 2*i,SP.median(d[af][model][feature][:,I],axis=1), "-o", linewidth=3, markersize=9, color=colors[i])[0])
PL.errorbar(coverages + 2*i, SP.median(d[af][model][feature][:,I],axis=1), yerr=err, fmt="-o", linewidth=1, markersize=9,color=colors[i])
PL.xticks(coverages)
#PL.xlim(min(coverages),max(coverages))
PL.title("%s %s - %s"%(infl_set, feature, af))
PL.xlim(15,220)
if feature == "X": PL.ylim(0,8)
if p == 1: PL.legend(lines, models)
if save: PL.savefig("/Users/leopold/doc/write/manuscripts/2011_X_sQTL/figures/figure2013-3_2%s.pdf"%("ABCDEF"[p-1:p]))
PL.show()
示例3: PrintValues
def PrintValues( outfile, values, options, prefix = "",titles = None):
if options.flat or options.aggregate_column:
if options.add_header:
if prefix: outfile.write( "prefix\t" )
if titles: outfile.write( "column\t" )
print "\t".join( ("nval", "min", "max", "mean", "median", "stddev", "sum", "q1", "q3" ) )
for x in range(len(values)):
vals = values[x]
if len(vals) == 0:
if options.output_empty:
if titles: outfile.write( titles[x] + "\t" )
if prefix: outfile.write( prefix + "\t" )
outfile.write( "0" + "\tna" * 8 + "\n" )
continue
if titles: outfile.write( titles[x] + "\t" )
if prefix: outfile.write( prefix + "\t" )
vals.sort()
if len(vals) > 4:
q1 = options.value_format % vals[len(vals) // 4]
q3 = options.value_format % vals[len(vals) * 3 // 4]
else:
q1 = options.value_format % vals[0]
q3 = options.value_format % vals[-1]
outfile.write( "\t".join( ( "%i" % len(vals),
options.value_format % float(min(vals)),
options.value_format % float(max(vals)),
options.value_format % scipy.mean(vals),
options.value_format % scipy.median(vals),
options.value_format % scipy.std(vals),
options.value_format % reduce( lambda x, y: x+y, vals),
q1, q3,
)) + "\n")
else:
if titles:
print "category\t%s" % string.join(titles,"\t")
print "count\t%s" % (string.join( map(lambda v: "%i" % len(v), values), "\t"))
print "min\t%s" % (string.join( map(lambda v: options.value_format % min(v), values), "\t"))
print "max\t%s" % (string.join( map(lambda v: options.value_format % max(v), values), "\t"))
print "mean\t%s" % (string.join( map(lambda v: options.value_format % scipy.mean(v), values), "\t"))
print "median\t%s" % (string.join( map(lambda v: options.value_format % scipy.median(v), values), "\t"))
print "stddev\t%s" % (string.join( map(lambda v: options.value_format % scipy.std(v), values), "\t"))
print "sum\t%s" % (string.join( map(lambda v: options.value_format % reduce( lambda x,y: x+y, v), values), "\t"))
print "q1\t%s" % (string.join( map(lambda v: options.value_format % scipy.stats.scoreatpercentile(v,per=25), values), "\t"))
print "q3\t%s" % (string.join( map(lambda v: options.value_format % scipy.stats.scoreatpercentile(v,per=75), values), "\t"))
示例4: subtract_overscan
def subtract_overscan(data,x,y):
"""This function finds the median values in each of the four overscan
regions and subtracts them from the appropriate regions of the
input data file. It then converts the results back to electrons
rather than ADU"""
# Define bias region limits
bx1 = slice(0,15,1)
bx2 = slice(2065,2080,1)
y1 = slice(0,1024,1)
y2 = slice(1024,2048,1)
# Define limits of regions associated with the four amps
x1 = slice(16,1040)
x2 = slice(1040,2064)
# Define median values of overscan regions from appropriate data regions
newdata = data.astype(scipy.float32)
overscan = scipy.zeros((4,1))
overscan[0] = scipy.median(newdata[y1,bx1].ravel())
overscan[1] = scipy.median(newdata[y2,bx1].ravel())
overscan[2] = scipy.median(newdata[y1,bx2].ravel())
overscan[3] = scipy.median(newdata[y2,bx2].ravel())
# Subtract overscan
newdata[y1,x1] = newdata[y1,x1] - overscan[0]
newdata[y2,x1] = newdata[y2,x1] - overscan[1]
newdata[y1,x2] = newdata[y1,x2] - overscan[2]
newdata[y2,x2] = newdata[y2,x2] - overscan[3]
newdata = newdata[y,x]
return newdata
示例5: WriteRadius
def WriteRadius(mali, identifiers, prefix="", gap_char="-"):
"""write percent identities in pairwise comparisons both for nucleotide acids and amino acids."""
pides_na = []
seq_aa = []
for x in range(0, len(identifiers)):
seq_aa.append(Genomics.TranslateDNA2Protein(mali[identifiers[x]]))
for y in range(x + 1, len(identifiers)):
if x == y:
continue
pides_na.append(MaliIO.getPercentIdentity(
mali[identifiers[x]], mali[identifiers[y]], gap_char))
pides_aa = []
for x in range(0, len(identifiers) - 1):
for y in range(x + 1, len(identifiers)):
pides_aa.append(
MaliIO.getPercentIdentity(seq_aa[x], seq_aa[y], gap_char))
print "%s\tpide\t%i\t" % (prefix, len(pides_na)) +\
string.join(map(lambda x: "%.2f" % x, (min(pides_na),
max(pides_na),
scipy.mean(pides_na),
scipy.median(pides_na),
numpy.std(pides_na))), "\t") + "\t" +\
string.join(map(lambda x: "%.2f" % x, (min(pides_aa),
max(pides_aa),
scipy.mean(pides_aa),
scipy.median(pides_aa),
numpy.std(pides_aa))), "\t")
示例6: lossTraces
def lossTraces(fwrap, aclass, dim, maxsteps, storesteps=None, x0=None,
initNoise=0., minLoss=1e-10, algoparams={}):
""" Compute a number of loss curves, for the provided settings,
stored at specific storestep points. """
if not storesteps:
storesteps = range(maxsteps + 1)
# initial points, potentially noisy
if x0 is None:
x0 = ones(dim) + randn(dim) * initNoise
# tracking progress by callback
paramtraces = {'index':-1}
def storer(a):
lastseen = paramtraces['index']
for ts in [x for x in storesteps if x > lastseen and x <= a._num_updates]:
paramtraces[ts] = a.bestParameters.copy()
paramtraces['index'] = a._num_updates
# initialization
algo = aclass(fwrap, x0, callback=storer, **algoparams)
print algo, fwrap, dim, maxsteps,
# store initial step
algo.callback(algo)
algo.run(maxsteps)
# process learning curve
del paramtraces['index']
paramtraces = array([x for _, x in sorted(paramtraces.items())])
oloss = mean(fwrap.stochfun.expectedLoss(ones(100) * fwrap.stochfun.optimum))
ls = abs(fwrap.stochfun.expectedLoss(ravel(paramtraces)) - oloss) + minLoss
ls = reshape(ls, paramtraces.shape)
print median(ls[-1])
return ls
示例7: plotAllCombinations
def plotAllCombinations(aclasses, avariants,
fclasses, fvariants,
trials, maxsteps, maxbatchsize=10):
fundic = {}
ploti = 1
rows = sum([len(avariants[ac]) for ac in aclasses]) + len(aclasses) - 1
cols = len(fvariants) * len(fclasses) + len(fclasses) - 1
f_mid = int(median(range(len(fvariants))))
for ac_id, aclass in enumerate(aclasses):
a_mid = int(median(range(len(avariants[aclass]))))
for as_id, aparams in enumerate(avariants[aclass]):
if as_id == 0 and ac_id > 0:
ploti += cols
for fc_id, fclass in enumerate(fclasses):
if fc_id not in fundic:
# shared samples across all uses of one function
fun = fclass()
fwrap = FunctionWrapper(trials, fun, record_samples=True)
fwrap.nextSamples(maxbatchsize * (maxsteps+10))
fundic[fc_id] = fwrap._seen
data = fundic[fc_id]
for fs_id, fsettings in enumerate(fvariants):
if fs_id == 0 and fc_id > 0:
ploti += 1
fun = fclass(**fsettings)
provider = DataFunctionWrapper(data, fun, shuffling=False)
pylab.subplot(rows, cols, ploti); ploti += 1
plotHeatmap(provider, aclass, aparams, trials, maxsteps)
if ac_id == 0 and as_id == 0 and fs_id == f_mid:
pylab.title(fclass.__name__[5:])
if fs_id == 0 and as_id == a_mid:
pylab.ylabel(aclass.__name__[:6])
pylab.subplots_adjust(left=0.1, bottom=0.01, right=0.99, top=0.9, wspace=0.05, hspace=0.05)
示例8: plot_hist_compare
def plot_hist_compare(self,which_case):
plt.ylabel('Percentage of points')
plt.xlabel('Percentage RMS relative error')
def yto_percent(y, x):
s = str(sp.around((y/(len(self.REL_ERR)*1.0)*100),2))
if matplotlib.rcParams['text.usetex'] is True:
return s + r'$\%$'
else:
return s + '%'
def xto_percent(y, x):
s = str(y*100)
if matplotlib.rcParams['text.usetex'] is True:
return s + r'$\%$'
else:
return s + '%'
thermo1, thermo2, = self.select[which_case]
#Plot the SU2 error
i=0;
self.REL_ERR = 0;
for v in self.variables[sp.where\
((self.variables!=thermo1) * (self.variables!=thermo2))]:
i=i+1;
self.REL_ERR = self.REL_ERR + \
((getattr(self.SU2[which_case],v)-getattr(self.RandomSamples,v))/\
(getattr(self.RandomSamples,v)))**2;
self.REL_ERR = sp.sqrt(self.REL_ERR)/i
plt.hist(self.REL_ERR, bins=25, color='k', alpha=0.3, label='SU2')
print 'Error max SU2', max(self.REL_ERR)
setattr(self.SU2[which_case],"median_ERR",sp.median(self.REL_ERR));
#Plot the SciPy error
i =0;
self.REL_ERR = 0;
for v in self.variables[sp.where\
((self.variables!=thermo1) * (self.variables!=thermo2))]:
i=i+1;
self.REL_ERR = self.REL_ERR + \
((getattr(self.SciPy[which_case],v)-getattr(self.RandomSamples,v))/\
(getattr(self.RandomSamples,v)))**2;
self.REL_ERR = sp.sqrt(self.REL_ERR)/i
plt.hist(self.REL_ERR, bins=25, color='c', alpha=0.5, label='SciPy')
print 'Error max SciPy', max(self.REL_ERR)
setattr(self.SciPy[which_case],"median_ERR",sp.median(self.REL_ERR));
formatter_y = FuncFormatter(yto_percent)
formatter_x = FuncFormatter(xto_percent)
plt.gca().yaxis.set_major_formatter(formatter_y)
plt.gca().xaxis.set_major_formatter(formatter_x)
plt.grid(which='both')
plt.legend()
return
示例9: _printStuff
def _printStuff(self):
print self._num_updates,
for n, a in self._print_quantities:
#print n, type(a)
if abs(median(a)) > 1e4 or abs(median(a)) < 1e-3:
print n, median(a), '\t',
else:
print n, round(median(a), 4), '\t',
print
示例10: calculate_varPrior
def calculate_varPrior(disp_raw, disp_fitted, idx, varLogDispSamp):
logRes = sp.log(disp_raw[idx]) - sp.log(disp_fitted[idx])
stdLogRes = sp.median(abs(logRes - sp.median(logRes))) * 1.4826
varLogRes = stdLogRes ** 2
varPrior = varLogRes - varLogDispSamp
return max(varPrior, 0.1)
示例11: __call__
def __call__(self, x):
res = median([self.f(x) for _ in range(int(self.resample_over))])
if self.num_evals % self.batchsize == 0 and self.num_evals > 0:
alt_res = median([self.f(x) for _ in range(int(self.resample_over))])
self._adaptResampling(res, alt_res)
res = 0.5 * res + 0.5 * alt_res
self.recents[self.num_evals % self.batchsize] = res
self.num_evals += 1
return res
示例12: MAD
def MAD(a, c=0.6745):
"""
Median Absolute Deviation along first axis of an array:
median(abs(a - median(a))) / c
"""
a = N.asarray(a, N.float64)
d = N.multiply.outer(median(a), N.ones(a.shape[1:]))
return median(N.fabs(a - d) / c)
示例13: mad_clipping
def mad_clipping(input_data, sigma_clip_level, return_length=False):
medval = median(input_data)
sigma = 1.4826 * median(abs(medval - input_data))
high_sigma_clip_limit = medval + sigma_clip_level * sigma
low_sigma_clip_limit = medval - sigma_clip_level * sigma
clipped_data = input_data[(input_data>(low_sigma_clip_limit)) & \
(input_data<(high_sigma_clip_limit))]
new_medval = median(clipped_data)
new_sigma = 1.4826 * median(abs(medval - clipped_data))
if return_length:
return new_medval, new_sigma, len(clipped_data)
else:
return new_medval, new_sigma
示例14: mad_clipping
def mad_clipping(input_data, sigma_clip_level):
medval = median(input_data)
sigma = 1.48 * median(abs(medval - input_data))
high_sigma_clip_limit = medval + sigma_clip_level * sigma
low_sigma_clip_limit = medval - sigma_clip_level * sigma
clipped_data = []
for value in input_data:
if (value > low_sigma_clip_limit) and (value < high_sigma_clip_limit):
clipped_data.append(value)
clipped_data_array = array(clipped_data)
new_medval = median(clipped_data_array)
new_sigma = 1.48 * median(abs(medval - clipped_data_array))
return clipped_data_array, new_medval, new_sigma
示例15: __amp_detect
def __amp_detect(self, x):
ref = np.floor(self.min_ref_per*self.sr/1000.0)
# HIGH-PASS FILTER OF THE DATA
(b,a) = signal.ellip(2, 0.1, 40, [self.fmin_detect*2.0/self.sr,self.fmax_detect*2.0/self.sr], btype='bandpass', analog=0, output='ba')
xf_detect = signal.filtfilt(b, a, x)
(b,a) = signal.ellip(2, 0.1, 40, [self.fmin_sort*2.0/self.sr,self.fmax_sort*2.0/self.sr], btype='bandpass', analog=0, output='ba')
xf = signal.filtfilt(b, a, x)
noise_std_detect = scipy.median(np.abs(xf_detect))/0.6745;
noise_std_sorted = scipy.median(np.abs(xf))/0.6745;
thr = self.stdmin * noise_std_detect #thr for detection is based on detected settings.
thrmax = self.stdmax * noise_std_sorted #thrmax for artifact removal is based on sorted settings.
# LOCATE SPIKE TIMES
nspk = 0;
xaux = np.argwhere(xf_detect[self.w_pre+1:len(xf_detect)-self.w_post-1-1] > thr) + self.w_pre + 1
xaux = np.resize(xaux,len(xaux))
xaux0 = 0;
index = []
for i in range(len(xaux)):
if xaux[i] >= (xaux0 + ref):
# after find a peak it begin search after ref over the last xaux
iaux = xf[xaux[i]:xaux[i]+np.floor(ref/2.0)].argmax(0) # introduces alignment
nspk = nspk + 1
index.append(iaux + xaux[i])
xaux0 = index[nspk-1];
# SPIKE STORING (with or without interpolation)
ls = self.w_pre + self.w_post
spikes = np.zeros([nspk,ls+4])
xf = np.concatenate((xf,np.zeros(self.w_post)),axis=0)
for i in range(nspk): # Eliminates artifacts
if np.max( np.abs( xf[index[i]-self.w_pre:index[i]+self.w_post] )) < thrmax :
spikes[i,:] = xf[index[i]-self.w_pre-1:index[i]+self.w_post+3]
aux = np.argwhere(spikes[:,self.w_pre] == 0) #erases indexes that were artifacts
if len(aux) != 0:
aux = aux.reshape((1,len(aux)))[0]
spikes = np.delete(spikes, aux, axis = 0)
index = np.delete(index,aux)
if self.interpolation == 'y':
# Does interpolation
spikes = self.__int_spikes(spikes)
return spikes, thr, index