本文整理汇总了Python中numpy.median函数的典型用法代码示例。如果您正苦于以下问题:Python median函数的具体用法?Python median怎么用?Python median使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了median函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _getTotalDuration
def _getTotalDuration(self,actStream): #for bed toilet transition margin = 1 hr
totDuration=0;
count = 0;
durlist = [];
for i in range(0,len(actStream)-2):
#print actStream[i]
firstLine = actStream[i].split(" ");
secondLine =actStream[i+1].split(" ");
#get a date from here
d1= self._get_datetime(firstLine[0],firstLine[1]);
d2=self._get_datetime(secondLine[0],secondLine[1]);
td= d2-d1;
duration =td.total_seconds();
#print td, duration
#durlist.append(duration)
margin = self._calculateMargin(d1,d2);
if duration > 60*margin:
#check to see if there were other activities,
count=count+1;
continue;
durlist.append(duration)
totDuration=duration+totDuration;
try:
#print round(min(durlist)/3600, 2), round(max(durlist)/3600, 2), round(totDuration/3600,2), round(sum(durlist)/3600, 2)
#return (round(totDuration/60,5), count, round(numpy.min(durlist)/60, 5), round(numpy.max(durlist)/60, 5), round(numpy.median(durlist)/60,5), round(numpy.average(durlist)/60, 5));
return (round(numpy.median(durlist)/60,5), count);
except ValueError:
#return (round(totDuration/60,5),count, 0, 0, 0, 0);
return (round(numpy.median(durlist)/60,5), count);
示例2: work
def work(self):
self.worked = True
kwargs = dict(
weights=self.weights,
mus=self.mus,
sigmas=self.sigmas,
low=self.low,
high=self.high,
q=self.q,
)
samples = GMM1(rng=self.rng,
size=(self.n_samples,),
**kwargs)
samples = np.sort(samples)
edges = samples[::self.samples_per_bin]
#print samples
pdf = np.exp(GMM1_lpdf(edges[:-1], **kwargs))
dx = edges[1:] - edges[:-1]
y = 1 / dx / len(dx)
if self.show:
plt.scatter(edges[:-1], y)
plt.plot(edges[:-1], pdf)
plt.show()
err = (pdf - y) ** 2
print np.max(err)
print np.mean(err)
print np.median(err)
if not self.show:
assert np.max(err) < .1
assert np.mean(err) < .01
assert np.median(err) < .01
示例3: compute_ks_by_contained
def compute_ks_by_contained(contigs_by_lib_name, sinks, sources):
# compute median of maxmin as well as ks p-value of contained maxmin
for lib_snk in contigs_by_lib_name:
# for a fixed lib_snk; do all source libs together
# contained_ctg: contig names of all source libraries stored by source library names
contained_ctg=collections.defaultdict(set)
for snkCtg in contigs_by_lib_name[lib_snk].itervalues():
for srcCtg in snkCtg.contained_in:
contained_ctg[srcCtg.lib].add(srcCtg.name)
for lib_src in contigs_by_lib_name:
if lib_src in contained_ctg:
contained=[]
not_contained=[]
for ctg in contigs_by_lib_name[lib_src]:
if ctg in contained_ctg[lib_src]:
contained.append(contigs_by_lib_name[lib_src][ctg].maxmin)
else:
not_contained.append(contigs_by_lib_name[lib_src][ctg].maxmin)
# contained=[contigs_by_lib_name[lib_src][ctg].maxmin for ctg in contigs_by_lib_name[lib_src] if ctg in contained_ctg[lib_src]]
# not_contained=[contigs_by_lib_name[lib_src][ctg].maxmin for ctg in contigs_by_lib_name[lib_src] if ctg not in contained_ctg[lib_src]]
ks_pvalue = stats.ks_2samp(contained, not_contained)[1]
print lib_src, lib_snk, ks_pvalue, sum(contained)/len(contained), sum(not_contained)/len(not_contained)
if ks_pvalue < 0.05 and np.median(contained) > np.median(not_contained):
sources[lib_snk] |= {lib_src}
sinks[lib_src] |= {lib_snk}
示例4: getStripStatistics
def getStripStatistics(self, yKey='vPhi', nMin=10):
"""For each of the strips, get the strip statistics"""
if np.size(self.stripsFeH) < 1:
self.buildStripsFeH()
# may as well loop through!!
# View of what we're using for our vertical quantity
x = self.tSim['FeHObs']
y = self.tSim[yKey]
nStrips = np.size(self.stripsFeH) - 1
self.stripCounts = np.zeros(nStrips, dtype='int')
self.stripMeans = np.zeros(nStrips)
self.stripMedns = np.zeros(nStrips)
self.stripStdds = np.zeros(nStrips)
self.stripFeHs = np.zeros(nStrips) # central point for sample
for iStrip in range(nStrips):
xLo = self.stripsFeH[iStrip]
xHi = self.stripsFeH[iStrip+1]
bStrip = (self.bSel) & (x >= xLo) & (x < xHi)
self.stripCounts[iStrip] = np.sum(bStrip)
if self.stripCounts[iStrip] < nMin:
continue
self.stripMeans[iStrip] = np.mean(y[bStrip])
self.stripMedns[iStrip] = np.median(y[bStrip])
self.stripStdds[iStrip] = np.std(y[bStrip])
self.stripFeHs[iStrip] = np.median(x[bStrip])
示例5: start_requests
def start_requests(self):
summary_utc = datetime.utcnow() - timedelta(days=1)
db_engine = create_engine(self.settings.get('SQLALCHEMY_DATABASE_URI'))
db_session = sessionmaker(bind=db_engine)()
db_query = db_session.query(LiveTVSite.id.label('site_id'), LiveTVRoom.id.label('room_id'),
LiveTVRoom.url.label('room_url'),
LiveTVRoomPresent.crawl_date_format.label('summary_date'),
func.array_agg(LiveTVRoomPresent.online).label('online_list'))\
.join(LiveTVSite, LiveTVRoom, LiveTVRoomPresent)\
.filter(LiveTVRoomPresent.crawl_date_format == summary_utc.strftime(DAILY_DATE_FORMAT))\
.group_by(LiveTVSite.id, LiveTVRoom.id, LiveTVRoom.url, LiveTVRoomPresent.crawl_date_format)
for group_row in db_query:
meta_info = {
'site_id': group_row.site_id,
'room_id': group_row.room_id,
'summary_date': group_row.summary_date,
'online': numpy.median(group_row.online_list)
}
room = self.session.query(LiveTVRoom).filter_by(id=meta_info['room_id']).one_or_none()
if room:
yield DailyItem(site_id=group_row.site_id, room_id=group_row.room_id,
summary_date=group_row.summary_date, online=numpy.median(group_row.online_list),
followers=room.followers, description=room.description, announcement=room.announcement,
fallback=False)
db_session.close()
示例6: allclose_with_out
def allclose_with_out(x, y, atol=0.0, rtol=1.0e-5):
# run the np.allclose on x and y
# if it fails print some stats
# before returning
ac = np.allclose(x, y, rtol=rtol, atol=atol)
if not ac:
dd = np.abs(x - y)
neon_logger.display('abs errors: %e [%e, %e] Abs Thresh = %e'
% (np.median(dd), np.min(dd), np.max(dd), atol))
amax = np.argmax(dd)
if np.isscalar(x):
neon_logger.display('worst case: %e %e' % (x, y.flat[amax]))
elif np.isscalar(y):
neon_logger.display('worst case: %e %e' % (x.flat[amax], y))
else:
neon_logger.display('worst case: %e %e' % (x.flat[amax], y.flat[amax]))
dd = np.abs(dd - atol) / np.abs(y)
neon_logger.display('rel errors: %e [%e, %e] Rel Thresh = %e'
% (np.median(dd), np.min(dd), np.max(dd), rtol))
amax = np.argmax(dd)
if np.isscalar(x):
neon_logger.display('worst case: %e %e' % (x, y.flat[amax]))
elif np.isscalar(y):
neon_logger.display('worst case: %e %e' % (x.flat[amax], y))
else:
neon_logger.display('worst case: %e %e' % (x.flat[amax], y.flat[amax]))
return ac
示例7: q1
def q1():
# generate random clusters
clusters = []
sizes = range(2, 201)
for size in sizes:
clusters.append(gen_random_clusters(size))
# get running times
random.seed(912)
# run 10 trials, and take the median time for each n to smooth data
slow_trials = np.zeros((10, 199))
fast_trials = np.zeros((10, 199))
for i in range(10):
slow_trials[i,:] = timer(slow_closest_pair, clusters)
fast_trials[i,:] = timer(fast_closest_pair, clusters)
# times
slow_times = np.median(slow_trials, 0)
fast_times = np.median(fast_trials, 0)
# plot
plt.figure()
plt.plot(sizes, slow_times, 'c-', label='slow_closest_pair')
plt.plot(sizes, fast_times, 'm-', label='fast_closest_pair')
plt.legend(loc='upper left')
plt.xlabel('Size of Cluster List')
plt.ylabel('Median Running Time (s), 10 Trials')
plt.title('Comparison of Running Times on Desktop Python')
plt.show()
return None
示例8: remaining_time
def remaining_time(self):
"""Return our best estimate of the remaining duration, or None
if we have no bases for guessing."""
if self.end_times is None:
return None # We have not started the first module yet
else:
module_index = self.current_module.module_num - 1
index = self.image_set_index * self.num_modules + module_index
durations = (self.end_times[1:] - self.end_times[:-1]).reshape(self.num_image_sets, self.num_modules)
per_module_estimates = np.zeros(self.num_modules)
per_module_estimates[:module_index] = np.median(durations[:self.image_set_index+1,:module_index], 0)
current_module_so_far = self.adjusted_time() - self.end_times[1 + index - 1]
if self.image_set_index > 0:
per_module_estimates[module_index:] = np.median(durations[:self.image_set_index,module_index:], 0)
per_module_estimates[module_index] = max(per_module_estimates[module_index], current_module_so_far)
else:
# Guess that the modules that haven't finished yet are
# as slow as the slowest one we've seen so far.
per_module_estimates[module_index] = current_module_so_far
per_module_estimates[module_index:] = per_module_estimates[:module_index+1].max()
if False:
print "current_module_so_far =", current_module_so_far, "; adjusted_time =", self.adjusted_time(), "; end_times =", self.end_times
print "durations:"
print durations
print "per_module_estimates:"
print per_module_estimates
per_module_estimates[:module_index] *= self.num_image_sets - self.image_set_index - 1
per_module_estimates[module_index:] *= self.num_image_sets - self.image_set_index
per_module_estimates[module_index] -= current_module_so_far
return per_module_estimates.sum()
示例9: lonlat2xy
def lonlat2xy(lon,lat,lon_0=None,lat_0=None):
""" Convert pairs of (Lat,Lon) into (x,y)
Input:
Lon [deg]
Lat [deg]
Lon_0 [deg] => Lon of the origin of the cartesian system
Lat_0 [deg] => Lat of the origin of the cartesian system
Output:
x [m]
y [m]
The projection is deformed as get away from the center. Since the
Latitudes don't deform, the y is estimated first, then for each
point is estimated the distante to the meridian of reference
(Lon_0) considering the Latitude of the measurement.
"""
if (lat_0==None) or (lon_0==None):
lat_0=numpy.median(lat)
lon_0=numpy.median(lon)
from fluid.common.distance import distance
y=distance(lat,0,lat_0,0)
y[lat<lat_0]=-1*y[lat<lat_0]
x=distance(lat,lon,lat,lon_0)
x[lon<lon_0]=-1*x[lon<lon_0]
return x,y
示例10: is_outlier
def is_outlier(points, threshold=3.5):
"""
Returns a boolean array with True if points are outliers and False
otherwise.
Data points with a modified z-score greater than this
# value will be classified as outliers.
"""
# transform into vector
if len(points.shape) == 1:
points = points[:,None]
# compute median value
median = np.median(points, axis=0)
# compute diff sums along the axis
diff = np.sum((points - median)**2, axis=-1)
diff = np.sqrt(diff)
# compute MAD
med_abs_deviation = np.median(diff)
# compute modified Z-score
# http://www.itl.nist.gov/div898/handbook/eda/section4/eda43.htm#Iglewicz
modified_z_score = 0.6745 * diff / med_abs_deviation
# return a mask for each outlier
return modified_z_score > threshold
示例11: _idealize_uncert
def _idealize_uncert(dds):
for action in dds.actions:
field = action.diffeo.d
field_inv = action.diffeo_inv.d
I = np.zeros(field.shape)
Y, X = np.meshgrid(range(field.shape[1]), range(field.shape[0]))
I[:, :, 0] = X
I[:, :, 1] = Y
D = field - I
v = (np.median(D[:, :, 0]), np.median(D[:, :, 1]))
D_inv = field_inv - I
v_inv = (np.median(D_inv[:, :, 0]), np.median(D_inv[:, :, 1]))
print('v = ' + str(v))
print('v_inv = ' + str(v_inv))
for c in itertools.product(range(X.shape[0]), range(X.shape[1])):
if defined_cell(c, X.shape, v):
action.diffeo.variance[c] = 1.0
else:
action.diffeo.variance[c] = 0.0
if defined_cell(c, X.shape, v_inv):
action.diffeo_inv.variance[c] = 1.0
else:
action.diffeo_inv.variance[c] = 0.0
return dds
示例12: denoise
def denoise(self, data, wavelet):
noiseSigma = median(absolute(data - median(data))) / 0.6745
levels = int(floor(log(len(data))))
WC = pywt.wavedec(data, wavelet, level=levels)
threshold = noiseSigma * sqrt(2 * log(len(data)))
NWC = map(lambda x: pywt.thresholding.hard(x, threshold), WC)
return pywt.waverec(NWC, wavelet)
示例13: __init__
def __init__(self, f, label, color="k", linestyle="-"):
d = np.load(f)
self.data = d
self.mass = d["mass"]
self.ul_med = []
self.ul68_lo = []
self.ul68_hi = []
self.ul95_lo = []
self.ul95_hi = []
self.label = label
self.color = color
self.linestyle = linestyle
for i in range(len(d["mass"])):
ul = np.sort(d["ul"][:, i])
ul = ul[ul > 0]
n = len(ul)
m = np.median(ul)
self.ul68_lo.append(ul[max(0, n / 2.0 - n * 0.34)])
self.ul68_hi.append(ul[min(n - 1, n / 2.0 + n * 0.34)])
self.ul95_lo.append(ul[max(0, n / 2.0 - n * 0.95 / 2.0)])
self.ul95_hi.append(ul[min(n - 1, n / 2.0 + n * 0.95 / 2.0)])
self.ul_med.append(np.median(ul))
示例14: make_lick_individual
def make_lick_individual(targetSN, w1, w2):
""" Make maps for the kinematics. """
filename = "lick_corr_sn{0}.tsv".format(targetSN)
binimg = pf.getdata("voronoi_sn{0}_w{1}_{2}.fits".format(targetSN, w1, w2))
intens = "collapsed_w{0}_{1}.fits".format(w1, w2)
extent = calc_extent(intens)
bins = np.loadtxt(filename, usecols=(0,), dtype=str).tolist()
bins = np.array([x.split("bin")[1] for x in bins]).astype(int)
data = np.loadtxt(filename, usecols=np.arange(25)+1).T
labels = [r'Hd$_A$', r'Hd$_F$', r'CN$_1$', r'CN$_2$', r'Ca4227', r'G4300',
r'Hg$_A$', r'Hg$_F$', r'Fe4383', r'Ca4455', r'Fe4531', r'C4668',
r'H$_\beta$', r'Fe5015', r'Mg$_1$', r'Mg$_2$', r'Mg$_b$', r'Fe5270',
r'Fe5335', r'Fe5406', r'Fe5709', r'Fe5782', r'Na$_D$', r'TiO$_1$',
r'TiO$_2$']
mag = "[mag]"
ang = "[\AA]"
units = [ang, ang, mag, mag, ang, ang,
ang, ang, ang, ang, ang, ang,
ang, ang, mag, mag, ang, ang,
ang, ang, ang, ang, ang, mag,
mag]
lims = [[None, None], [None, None], [None, None], [None, None],
[None, None], [None, None], [None, None], [None, None],
[None, None], [None, None], [None, None], [None, None],
[None, None], [None, None], [None, None], [None, None],
[None, None], [None, None], [None, None], [None, None],
[None, None], [None, None], [None, None], [None, None],
[None, None], [None, None], [None, None], [None, None]]
pdf = PdfPages("figs/lick_sn{0}.pdf".format(targetSN))
fig = plt.figure(1, figsize=(6.25,5))
plt.subplots_adjust(bottom=0.12, right=0.97, left=0.09, top=0.96)
plt.minorticks_on()
ax = plt.subplot(111)
ax.minorticks_on()
plot_indices = np.arange(12,22)
for i, vector in enumerate(data):
if i not in plot_indices:
continue
print "Making plot for {0}...".format(labels[i])
kmap = np.zeros_like(binimg)
kmap[:] = np.nan
for bin,v in zip(bins, vector):
idx = np.where(binimg == bin)
kmap[idx] = v
vmin = lims[i][0] if lims[i][0] else np.median(vector) - 2 * vector.std()
vmax = lims[i][1] if lims[i][1] else np.median(vector) + 2 * vector.std()
m = plt.imshow(kmap, cmap="inferno", origin="bottom", vmin=vmin,
vmax=vmax, extent=extent, aspect="equal")
make_contours()
plt.minorticks_on()
plt.xlabel("X [kpc]")
plt.ylabel("Y [kpc]")
plt.xlim(extent[0], extent[1])
plt.ylim(extent[2], extent[3])
cbar = plt.colorbar(m)
cbar.set_label("{0} {1}".format(labels[i], units[i]))
pdf.savefig()
plt.clf()
pdf.close()
return
示例15: test_compare_cache_benchmark
def test_compare_cache_benchmark(self, varying_param, analytics_data, plt):
stats = pytest.importorskip('scipy.stats')
d1, d2 = analytics_data
assert np.all(d1[varying_param] == d2[varying_param]), (
'Cannot compare different parametrizations')
axis_label = self.param_to_axis_label[varying_param]
print("Cache, varying {0}:".format(axis_label))
for label, key in zip(self.labels, self.keys):
clean_d1 = [self.reject_outliers(d) for d in d1[key]]
clean_d2 = [self.reject_outliers(d) for d in d2[key]]
diff = [np.median(b) - np.median(a)
for a, b in zip(clean_d1, clean_d2)]
p_values = np.array([2. * stats.mannwhitneyu(a, b)[1]
for a, b in zip(clean_d1, clean_d2)])
overall_p = 1. - np.prod(1. - p_values)
if overall_p < .05:
print(" {label}: Significant change (p <= {p:.3f}). See plots"
" for details.".format(
label=label, p=np.ceil(overall_p * 1000.) / 1000.))
else:
print(" {label}: No significant change.".format(label=label))
plt.plot(d1[varying_param], diff, label=label)
plt.xlabel("Number of %s" % axis_label)
plt.ylabel("Difference in build time (s)")
plt.legend(loc='best')