本文整理汇总了Python中pylab.bar函数的典型用法代码示例。如果您正苦于以下问题:Python bar函数的具体用法?Python bar怎么用?Python bar使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了bar函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: reaction_times_second_step
def reaction_times_second_step(sessions, fig_no = 1):
'Reaction times for second step pokes as function of common / rare transition.'
sec_step_IDs = ut.get_IDs(sessions[0].IDs, ['right_active', 'left_active'])
median_RTs_common = np.zeros(len(sessions))
median_RTs_rare = np.zeros(len(sessions))
for i,session in enumerate(sessions):
event_times = ut.get_event_times(session.time_stamps, session.event_codes, session.IDs)
left_active_times = event_times['left_active']
right_active_times = event_times['right_active']
left_reaction_times = _latencies(left_active_times, event_times['left_poke'])
right_reaction_times = _latencies(right_active_times, event_times['right_poke'])
ordered_reaction_times = np.hstack((left_reaction_times,right_reaction_times))\
[np.argsort(np.hstack((left_active_times,right_active_times)))]
transitions = session.blocks['trial_trans_state'] == session.CTSO['transitions'] # common vs rare.
median_RTs_common[i] = np.median(ordered_reaction_times[ transitions])
median_RTs_rare[i] = np.median(ordered_reaction_times[~transitions])
mean_RT_common = 1000 * np.mean(median_RTs_common)
mean_RT_rare = 1000 * np.mean(median_RTs_rare)
SEM_RT_common = 1000 * np.sqrt(np.var(median_RTs_common/len(sessions)))
SEM_RT_rare = 1000 * np.sqrt(np.var(median_RTs_rare /len(sessions)))
p.figure(fig_no)
p.bar([1,2],[mean_RT_common, mean_RT_rare], yerr = [SEM_RT_common,SEM_RT_rare])
p.xlim(0.8,3)
p.ylim(mean_RT_common * 0.8, mean_RT_rare * 1.1)
p.xticks([1.4, 2.4], ['Common', 'Rare'])
p.title('Second step reaction times')
p.ylabel('Reaction time (ms)')
print('Paired t-test P value: {}'.format(ttest_rel(median_RTs_common, median_RTs_rare)[1]))
示例2: plot_importances
def plot_importances(imp, clfName, obj):
imp=np.vstack(imp)
print imp
mean_importance = np.mean(imp,axis=0)
std_importance = np.std(imp,axis=0)
indices = np.argsort(mean_importance)[::-1]
print indices
print featureNames
featureList = []
# num_features = len(featureNames)
print("Feature ranking:")
for f in range(num_features):
featureList.append(featureNames[indices[f]])
print("%d. feature %s (%.2f)" % (f, featureNames[indices[f]], mean_importance[indices[f]]))
fig = pl.figure(figsize=(8,6),dpi=150)
pl.title("Feature importances",fontsize=30)
pl.bar(range(num_features), mean_importance[indices],
yerr = std_importance[indices], color=paired[0], align="center",
edgecolor=paired[0],ecolor=paired[1])
pl.xticks(range(num_features), featureList, size=15,rotation=90)
pl.ylabel("Importance",size=30)
pl.yticks(size=20)
pl.xlim([-1, num_features])
# fix_axes()
pl.tight_layout()
save_path = 'plots/'+obj+'/'+clfName+'_feature_importances.pdf'
fig.savefig(save_path)
示例3: command
def command(args):
from pylab import bar, yticks, subplots_adjust, show
from numpy import arange
import sr.tools.bom.bom as bom
import sr.tools.bom.parts_db as parts_db
db = parts_db.get_db()
m = bom.MultiBoardBom(db)
m.load_boards_args(args.arg)
m.prime_cache()
prices = []
for srcode, pg in m.items():
if srcode == "sr-nothing":
continue
prices.append((srcode, pg.get_price()))
prices.sort(key=lambda x: x[1])
bar(0, 0.8, bottom=range(0, len(prices)), width=[x[1] for x in prices],
orientation='horizontal')
yticks(arange(0, len(prices)) + 0.4, [x[0] for x in prices])
subplots_adjust(left=0.35)
show()
示例4: check_distribution
def check_distribution():
f = lambda x: math.atan2(x[0],x[1])
region=lambda x: l2norm(x)<=1.0
drift=lambda t: array((0.7, 0.5 ))
n_samples = 10000
n_measure_sets = 16
measure_sets = [ interval( 2.*pi*k/n_measure_sets-pi, 2.*pi*(k+1)/n_measure_sets-pi ) for k in range(n_measure_sets) ]
x = (0.3, 0. )
dt = 0.02
distribution, distribution_nocom = hitting_value_distribuion( n_samples, measure_sets, x, f, dt, drift, region )
for k in distribution:
print k
print 'sum:', sum(distribution)
print '\n', mean(distribution), var(distribution), sig_m(distribution)
import pylab as p
print '\n\n'
left = [ (2.*k/n_measure_sets-1)*pi for k in range(n_measure_sets) ]
p.bar(left, distribution, 2.*pi/n_measure_sets )
p.plot(left, distribution_nocom, 'ro' )
p.show()
示例5: plot_question
def plot_question(fname, question_text, data):
import pylab
import numpy as np
from matplotlib.font_manager import FontProperties
from matplotlib.text import Text
pylab.figure().clear()
pylab.title(question_text)
#pylab.xlabel("Verteilung")
#pylab.subplot(101)
if True or len(data) < 3:
width = 0.95
pylab.bar(range(len(data)), [max(y, 0.01) for x, y in data], 0.95, color="g")
pylab.xticks([i+0.5*width for i in range(len(data))], [x for x, y in data])
pylab.yticks([0, 10, 20, 30, 40, 50])
#ind = np.arange(len(data))
#pylab.bar(ind, [y for x, y in data], 0.95, color="g")
#pylab.ylabel("#")
#pylab.ylim(ymax=45)
#pylab.ylabel("Antworten")
#pylab.xticks(ind+0.5, histo.get_ticks())
#pylab.legend(loc=3, prop=FontProperties(size="smaller"))
##pylab.grid(True)
else:
pylab.pie([max(y, 0.1) for x, y in data], labels=[x for x, y in data], autopct="%.0f%%")
pylab.savefig(fname, format="png", dpi=75)
示例6: plotSpectrum
def plotSpectrum(self,spectrum,title):
fig=plt.figure(figsize=self.figsize, dpi=self.dpi);plt.ioff()
index, bar_width = spectrum.index.values,0.2
for i in range(spectrum.shape[1]):
plt.bar(index + i*bar_width, spectrum.icol(i).values, bar_width, color=mpl.cm.jet(1.*i/spectrum.shape[1]), label=spectrum.columns[i])
plt.xlabel('Allele') ;plt.xticks(index + 3*bar_width, index) ;plt.legend();
plt.title('Figure {}. {}'.format(self.fignumber, title),fontsize=self.titleSize); self.pdf.savefig(fig);self.fignumber+=1
示例7: bar_plot_1
def bar_plot_1(data):
"""Generates bar plot from data"""
x_labels=[a for (a,b) in data]
y_data=[b for (a,b) in data]
# Create chart
pos=range(1,len(x_labels)+1)
P.figure(1,figsize=(11,7))
P.bar(left=pos,height=y_data,log=True,width=.6,color="lightgrey",edgecolor="#8094B6")
pos2=[a+.3 for a in pos]
P.xticks(pos2,x_labels)
P.title("Evolution of network data size over time",fontsize="x-large")
P.xlabel("Network data sets (year published)",fontsize="large")
P.ylabel("Number of vertices [log(N)]",fontsize="large")
text_color="black"
for i in range(len(y_data)):
if i<2:
P.text(pos[i]+0.01,y_data[i]+5,int_to_scinot(y_data[i]),color=text_color)
elif i==2:
P.text(pos[i]+0.01,y_data[i]+100,int_to_scinot(y_data[i]),color=text_color)
elif i==3:
P.text(pos[i]+0.01,y_data[i]+1000,int_to_scinot(y_data[i]),color=text_color)
elif i==4:
P.text(pos[i]+0.01,y_data[i]+100000,int_to_scinot(y_data[i]),color=text_color)
else:
P.text(pos[i]+0.01,y_data[i]+1000000,int_to_scinot(y_data[i]),color=text_color)
P.savefig("../../images/figures/net_size_evo.png",dpi=100,format="png")
示例8: plot_stable_features
def plot_stable_features(X_train,y_train,featnames,**kwargs):
from sklearn.linear_model import LassoLarsCV,RandomizedLasso
n_resampling = kwargs.pop('n_resampling',200)
n_jobs = kwargs.pop('n_jobs',-1)
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
# estimate alphas via xvalidation
lars_cv = LassoLarsCV(cv=6,n_jobs=n_jobs).fit(X_train,y_train)
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42, n_jobs=n_jobs,
n_resampling=n_resampling)
clf.fit(X_train,y_train)
importances = clf.scores_
indices = np.argsort(importances)[::-1]
pl.bar(range(len(featnames)), importances[indices],
color="r", align="center")
pl.xticks(np.arange(len(featnames))+0.5,featnames[indices],
rotation=45,horizontalalignment='right')
pl.xlim(-0.5,len(featnames)-0.5)
pl.subplots_adjust(bottom=0.2)
pl.ylim(0,np.max(importances)*1.01)
pl.ylabel('Selection frequency (%) for %d resamplings '%n_resampling)
pl.title("Stability Selection: Selection Frequencies")
示例9: plot_importances
def plot_importances(clf,featnames,outfile,**kwargs):
pl.figure(figsize=(16,4))
featnames = np.array(featnames)
importances = clf.feature_importances_
imp_std = np.std([tree.feature_importances_ for tree in clf.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
#for featname in featnames[indices]:
# print featname
trunc_featnames = featnames[indices]
trunc_featnames = trunc_featnames[0:24]
trunc_importances = importances[indices]
trunc_importances = trunc_importances[0:24]
trunc_imp_std = imp_std[indices]
trunc_imp_std = trunc_imp_std[0:24]
pl.bar(range(len(trunc_featnames)), trunc_importances,
color="r", yerr=trunc_imp_std, align="center")
pl.xticks(np.arange(len(trunc_featnames))+0.5,trunc_featnames,rotation=45,
horizontalalignment='right')
pl.xlim(-0.5,len(trunc_featnames)-0.5)
pl.ylim(0,np.max(trunc_importances+trunc_imp_std)*1.01)
# pl.bar(range(len(featnames)), importances[indices],
# color="r", yerr=imp_std[indices], align="center")
# pl.xticks(np.arange(len(featnames))+0.5,featnames[indices],rotation=45,
# horizontalalignment='right')
# pl.xlim(-0.5,len(featnames)-0.5)
# pl.ylim(0,np.max(importances+imp_std)*1.01)
pl.subplots_adjust(bottom=0.2)
pl.show()
示例10: plot
def plot(xdata, ydata, std, title, xlabel, ylabel, label, color, alpha, miny, maxy, num=1):
import matplotlib
# matplotlib.use('Agg')
import pylab
import matplotlib.font_manager
# all goes to figure num
pylab.figure(num=num, figsize=(9.5, 9))
pylab.gca().set_position([0.10, 0.20, 0.85, 0.60])
# let the plot have fixed y-axis scale
ywindow = maxy - miny
# pylab.gca().set_ylim(miny, maxy+ywindow/5.0)
pylab.gca().set_ylim(miny, maxy)
# pylab.plot(xdata, ydata, 'b.', label=label, color=color)
# pylab.plot(xdata, ydata, 'b-', label='_nolegend_', color=color)
pylab.bar(xdata, ydata, 0.9, label=label, color=color, alpha=alpha)
t = pylab.title(title)
# http://old.nabble.com/More-space-between-title-and-secondary-x-axis-td31722298.html
t.set_y(1.05)
pylab.xlabel(xlabel)
pylab.ylabel(ylabel)
prop = matplotlib.font_manager.FontProperties(size=12)
leg = pylab.legend(loc="upper right", fancybox=True, prop=prop)
leg.get_frame().set_alpha(0.5)
示例11: _make_var_histogram
def _make_var_histogram(values, logp, nbins, ci, weights):
# Produce a histogram
hist, bins = np.histogram(values, bins=nbins, range=ci,
#new=True,
normed=True, weights=weights)
# Find the max likelihood for values in each bin
edges = np.searchsorted(values, bins)
histbest = [np.max(logp[edges[i]:edges[i+1]])
if edges[i] < edges[i+1] else -inf
for i in range(nbins)]
# scale to marginalized probability with peak the same height as hist
histbest = np.exp(np.asarray(histbest) - max(logp)) * np.max(hist)
import pylab
# Plot the histogram
pylab.bar(bins[:-1], hist, width=bins[1]-bins[0])
# Plot the kernel density estimate
#density = KDE1D(values)
#x = linspace(bins[0],bins[-1],100)
#pylab.plot(x, density(x), '-k', hold=True)
# Plot the marginal maximum likelihood
centers = (bins[:-1]+bins[1:])/2
pylab.plot(centers, histbest, '-g', hold=True)
示例12: show_char_use
def show_char_use(uchars, ucount):
"""
Plot spread of characters used in different sets:
- digits
- lowercase
- uppercase
- symbols (not alphanumeric)
"""
# Symbols are all printable characters minus alphanumerics
charsymbols = "".join(set.difference(set(string.printable), set(string.digits+string.ascii_letters)))
charsets = [string.digits, string.ascii_lowercase, string.ascii_uppercase, charsymbols]
charsetnames = ['digits', 'lowercase', 'uppercase', 'symbols']
for idx, (cs, csn) in enumerate(zip(charsets, charsetnames)):
# Select charset subset
thischars = [i for i in uchars if i in cs]
thiscount = [c for i, c in zip(uchars, ucount) if i in cs]
thiscountn = [t/(1.0*sum(thiscount)) for t in thiscount]
if (HAVE_PYLAB):
pylab.figure(100+idx);
pylab.title("Spread of %s" % csn)
thisidx = numpy.arange(len(thiscount))
pylab.bar(thisidx-0.4, thiscountn)
pylab.xticks(thisidx, thischars)
else:
print "Spread of %s" % csn
for c, n in zip(thischars, thiscountn):
# There are N=len(thischars) characters in this set,
# so on average each occurs 1/N times. A terminal window
# is 80 chars wide, which we equate to 4/N.
bar = "="*int(round(70.0/4.0*n*len(thischars)))
print " %s %2.0f %s" % (c, n*100, bar)
示例13: plotHousing
def plotHousing(impression):
"""假设impression是一个字符串,必须是‘flat’, ‘volatile’或者是‘fair’
生成房价随时间变化的图表"""
f = open("midWestHousingPrices.txt", "r")
# 文件的每一行是年季度价格
# 数据来自美国中部区域
labels, prices = ([], [])
for line in f:
year, quarter, price = line.split(" ")
label = year[2:4] + "\n Q" + quarter[1]
labels.append(label)
prices.append(float(price) / 1000)
quarters = pylab.arange(len(labels))
width = 0.8
if impression == "flat":
pylab.semilogy()
pylab.bar(quarters, prices, width)
pylab.xticks(quarters + width / 2.0, labels)
pylab.title("Housing Prices in U.S. Midwest")
pylab.xlabel("Quarter")
pylab.ylabel("Average Price($1,000's)")
if impression == "flat":
pylab.ylim(10, 10 ** 3)
elif impression == "volatile":
pylab.ylim(180, 220)
elif impression == "fair":
pylab.ylim(150, 250)
else:
raise ValueError
示例14: reaction_times_first_step
def reaction_times_first_step(sessions):
median_reaction_times = np.zeros([len(sessions),4])
all_reaction_times = []
for i,session in enumerate(sessions):
event_times = ut.get_event_times(session.time_stamps, session.event_codes, session.IDs)
ITI_start_times = event_times['ITI_start']
center_poke_times = sorted(np.hstack((event_times['high_poke'], event_times['low_poke'])))
reaction_times = 1000 * _latencies(ITI_start_times, center_poke_times)[1:-1]
all_reaction_times.append(reaction_times)
transitions = (session.blocks['trial_trans_state'] == session.CTSO['transitions'])[:len(reaction_times)] # Transitions common/rare.
outcomes = session.CTSO['outcomes'][:len(reaction_times)].astype(bool)
median_reaction_times[i, 0] = np.median(reaction_times[ transitions & outcomes]) # Common transition, rewarded.
median_reaction_times[i, 1] = np.median(reaction_times[~transitions & outcomes]) # Rare transition, rewarded.
median_reaction_times[i, 2] = np.median(reaction_times[ transitions & ~outcomes]) # Common transition, non-rewarded.
median_reaction_times[i, 3] = np.median(reaction_times[~transitions & ~outcomes]) # Rare transition, non-rewarded.
mean_RTs = np.mean(median_reaction_times,0)
SEM_RTs = np.sqrt(np.var(median_reaction_times,0)/len(sessions))
p.figure(1)
p.clf()
p.title('First step reaction times')
p.bar([1,2,3,4], mean_RTs, yerr = SEM_RTs)
p.ylim(min(mean_RTs) * 0.8, max(mean_RTs) * 1.1)
p.xticks([1.4, 2.4, 3.4, 4.4], ['Com. Rew.', 'Rare Rew.', 'Com. Non.', 'Rare. Non.'])
p.xlim(0.8,5)
p.ylabel('Reaction time (ms)')
all_reaction_times = np.hstack(all_reaction_times)
bin_edges = np.arange(0,3001)
rt_hist = np.histogram(all_reaction_times, bin_edges)[0]
cum_rt_hist = np.cumsum(rt_hist) / float(len(all_reaction_times))
p.figure(2)
p.clf()
p.plot(bin_edges[:-1],cum_rt_hist)
p.ylim(0,1)
p.xlabel('Time from ITI start (ms)')
p.ylabel('Cumumative fraction of first central pokes.')
示例15: test_wald_sample
def test_wald_sample(self):
acc=ShiftedWaldAccumulator(.2, .2, 2.0)
nsamples=100000
x=np.linspace(0,10, nsamples)
import pylab as pl
samp=acc.sample(nsamples)
#dens=scipy.stats.gaussian_kde(samp[samp<10])
pl.hist(acc.sample(nsamples),200, normed=True)
h,hx=np.histogram(samp, density=True, bins=1000)
hx=hx[:-1]+(hx[1]-hx[0])/2.
#assert np.all(np.abs(h-acc.pdf(hx))<1.5)
# kolmogoroff smirnov tests whether samples come from CDF
D,pv=scipy.stats.kstest(samp, acc.cdf)
print D,pv
assert pv>.05, "D=%f,p=%f"%(D,pv)
if True:
pl.clf()
#pl.subplot(2,1,1)
#pl.hist(samp[samp<10],300, normed=True, alpha=.3)
#pl.subplot(2,1,2)
pl.bar(hx, h, alpha=.3, width=hx[1]-hx[0])
pl.plot(x,acc.pdf(x), color='red', label='analytical')
#pl.plot(x,dens(x), color='green', label='kde')
pl.xlim(0,3)
pl.legend()
self.savefig()