本文整理汇总了Python中pylab.barh函数的典型用法代码示例。如果您正苦于以下问题:Python barh函数的具体用法?Python barh怎么用?Python barh使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了barh函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: barh
def barh(x, y=None, title='', xlabel='', ylabel=''):
import pylab as P
import numpy as np
L = (tuple, list, np.ndarray)
# separate arrays
if isinstance(x,L) and isinstance(y,L):
xylist = zip(x,y)
# list of two-tuples
elif isinstance(x, L) and isinstance(x[0], L) and len(x[0]) == 2:
xylist = x
else:
raise TypeError
P.figure(figsize=(10, 5)) # image dimensions
P.title(title, size='medium')
P.xlabel(xlabel)
P.ylabel(ylabel)
# add bars
for i, item in enumerate(xylist):
P.barh(i + 0.25 , item[1])
# set ylim
width = np.max(zip(*xylist)[1])
P.xlim(0, width*1.1)
# axis setup
P.yticks(np.arange(0.65, len(xylist)), ['%s' % x for x,y in xylist], size='medium')
示例2: build_model
def build_model():
#df = get_training_data()
df = get_sampling_training()
targets = np.array(df['success'])
del df['success']
del df['name']
columns = df.columns
data = np.array(df)
model = randomforest(data, targets, tree_num=200)
pickle.dump(model, open("data/rf.model", "w"))
# feature importance
feature_importance = model.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
pl.subplot(1, 2, 2)
pl.barh(pos, feature_importance[sorted_idx], align='center')
pl.yticks(pos, columns[sorted_idx])
pl.xlabel('Relative Importance')
pl.title('Variable Importance')
pl.savefig('plots/feature_imp.jpg')
示例3: length_stats_chart
def length_stats_chart(path, prefixes, sortby=1):
stats = []
for prefix in prefixes:
med, m,s = length_stats(prefix)
stats.append((prefix,med,m,s))
stats.sort(key=operator.itemgetter(sortby))
prefixes, med_list, mean_list, std_list = zip(*stats)
blockSize = 8
ind = p.arange(0, blockSize*len(prefixes), blockSize) # y location for groups
height = 3 # bar height
p3 = p.barh(ind, std_list, 2 * height, color = 'b', linewidth = 0)
p2 = p.barh(ind, med_list, height, color = 'g', linewidth = 0)
p1 = p.barh(ind+height, mean_list, height, color = 'r', linewidth = 0)
p.ylim(-height, len(prefixes) * blockSize)
yfontprop = FontProperties(size=4)
xfontprop = FontProperties(size='smaller')
p.xlabel('Unicode Codepoints')
p.ylabel('Language Code')
p.title('Descriptive Statistics for Document Lengths')
p.gca().yaxis.tick_left()
p.yticks(ind+height, prefixes, fontproperties = yfontprop)
xmin, xmax = p.xlim()
p.xticks( p.arange(xmin,xmax,1000),fontproperties = xfontprop)
p.gca().xaxis.grid(linestyle = '-', linewidth=0.15)
p.legend((p1[0], p2[0], p3[0]), ('Mean','Median','Standard Deviation'), prop = xfontprop, loc = 'lower right' )
p.savefig(path, dpi=300)
p.close()
p.clf()
示例4: plot
def plot(self, gs):
unit_len = self.show_len * 1. / 5.
if self.s.now - self.show_len < 0:
return
price = self.price[0][self.s.now - self.show_len : self.s.now]
profile_range = [price.min(), price.max() + 1]
floor, ceil = profile_range[0] - 1, profile_range[1] + 1
d = self.output(3, profile_range)
ax = plt.subplot(gs)
plt.plot(price)
day_begin = np.where(self.s.history['time_in_ticks'][self.s.now - self.show_len : self.s.now] == 0)[0]
for x in day_begin:
plt.axvline(x, color='r', linestyle=':')
y = self.smoothed_pivot_profile[floor : ceil]
plt.barh(np.arange(floor, ceil) - 0.5, y * unit_len, 1.0, label=self.name,
alpha=0.2, color='r', edgecolor='none')
last_price = int(get(self.price))
support = last_price + int(round((d['S_offset']) * self.volatility))
resistance = last_price + int(round((d['R_offset']) * self.volatility))
highlighted = [support, resistance]
plt.barh(np.array(highlighted) - 0.5, self.smoothed_pivot_profile[highlighted] * unit_len, 1.0,
alpha=1.0, color='r', edgecolor='none')
ax.set_xticks(np.arange(0, self.show_len * 1.22, unit_len))
ax.xaxis.grid(b=True, linestyle='--')
ax.yaxis.grid(b=False)
plt.legend(loc='upper right')
return ax
示例5: plot_predictions
def plot_predictions(self):
data = self.get_next_batch(train=False)[2] # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS
NUM_TOP_CLASSES = min(num_classes, 4) # show this many top labels
label_names = self.test_data_provider.batch_meta['label_names']
if self.only_errors:
preds = n.zeros((data[0].shape[1], num_classes), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, num_classes), dtype=n.single)
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
print rand_idx
data[0] = n.require(data[0][:,rand_idx], requirements='C')
data[1] = n.require(data[1][:,rand_idx], requirements='C')
data += [preds]
temp = data[0]
print data
print temp.ndim,temp.shape,temp.size
# Run the model
self.libmodel.startFeatureWriter(data, self.sotmax_idx)
self.finish_batch()
fig = pl.figure(3)
fig.text(.4, .95, '%s test case predictions' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
err_idx = nr.permutation(n.where(preds.argmax(axis=1) != data[1][0,:])[0])[:NUM_IMGS] # what the net got wrong
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
data[0] = self.test_data_provider.get_plottable_data(data[0])
for r in xrange(NUM_ROWS):
for c in xrange(NUM_COLS):
img_idx = r * NUM_COLS + c
if data[0].shape[0] <= img_idx:
break
pl.subplot(NUM_ROWS*2, NUM_COLS, r * 2 * NUM_COLS + c + 1)
pl.xticks([])
pl.yticks([])
try:
img = data[0][img_idx,:,:,:]
except IndexError:
# maybe greyscale?
img = data[0][img_idx,:,:]
pl.imshow(img, interpolation='nearest')
true_label = int(data[1][0,img_idx])
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
pl.subplot(NUM_ROWS*2, NUM_COLS, (r * 2 + 1) * NUM_COLS + c + 1, aspect='equal')
ylocs = n.array(range(NUM_TOP_CLASSES)) + 0.5
height = 0.5
width = max(ylocs)
pl.barh(ylocs, [l[0]*width for l in img_labels], height=height, \
color=['r' if l[1] == label_names[true_label] else 'b' for l in img_labels])
pl.title(label_names[true_label])
pl.yticks(ylocs + height/2, [l[1] for l in img_labels])
pl.xticks([width/2.0, width], ['50%', ''])
pl.ylim(0, ylocs[-1] + height*2)
示例6: plot_variable_importance
def plot_variable_importance(feature_importance, names_cols, save_name, save):
"""Show Variable importance graph."""
# scale by max importance first 20 variables in column names
feature_importance = feature_importance / feature_importance.max()
sorted_idx = np.argsort(feature_importance)[::-1][:20]
barPos = np.arange(sorted_idx.shape[0]) + .8
barPos = barPos[::-1]
#plot.figure(num=None, facecolor='w', edgecolor='r')
plot.figure(num=None, facecolor='w')
plot.barh(barPos, feature_importance[sorted_idx]*100, align='center')
plot.yticks(barPos, names_cols[sorted_idx])
plot.xticks(np.arange(0, 120, 20), \
['0 %', '20 %', '40 %', '60 %', '80 %', '100 %'])
plot.margins(0.02)
plot.subplots_adjust(bottom=0.15)
plot.title('Variable Importance')
if save:
plot.savefig(save_name, bbox_inches='tight', dpi = 300)
plot.close("all")
else:
plot.show()
示例7: test_feature
def test_feature(train_path):
data = np.genfromtxt(train_path, delimiter = ',')
y = data[:,0]
X = data[:,1:]
sample_size = len(y)
train_size = int(sample_size * .95)
params = {'n_estimators': 100, 'max_depth': 2, 'random_state': 1,
'min_samples_split': 5}
params.update({'learn_rate': 0.02, 'subsample': 1.0})
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X, y)
pl.figure()
feature_names = np.array(['type', 'type', 'type', 'main', 'log_main', 'evi', 'log_evi', 'df1', 'log_df1', 'dfu8', 'log_dfu8', 'dfband', 'log_dfband'])
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)[-8:]
pos = np.arange(sorted_idx.shape[0]) + .5
pl.barh(pos, feature_importance[sorted_idx], align='center')
pl.yticks(pos, feature_names[sorted_idx])
pl.xlabel('Relative Importance')
pl.title('Variable Importance')
pl.show()
示例8: plotNogazeDuration
def plotNogazeDuration():
plt.figure(figsize=(12,12))
for vp in range(100,120):
print vp
plt.subplot(5,4,vp-99)
plt.ion()
data=readTobii(vp,0,ETDATAPATH);
datT=[];datF=[]
for trl in data:
trl.extractBasicEvents()
miss=np.int32(np.logical_and(np.isnan(trl.gaze[:,7]),
np.isnan(trl.gaze[:,8])))
miss=removeShortEvs(miss,2*60)
miss=1-removeShortEvs(1-miss,1*60)
datT+=map(lambda x: (x[1]-x[0])/60.,tseries2eventlist(miss))
datF+=map(lambda x: (x[1]-x[0])/60.,tseries2eventlist(1-miss))
x=np.linspace(0,10,21);h=x[-1]/float(x.size-1)
a=np.histogram(datT,bins=x, normed=True)
plt.barh(x[:-1],-a[0],ec='k',fc='k',height=h,lw=0)
a=np.histogram(datF,bins=x, normed=True)
plt.barh(x[:-1],a[0],ec='g',fc='g',height=h,lw=0)
plt.xlim([-0.7,0.7]);
plt.gca().set_yticks(range(0,10,2))
plt.ylim([0,10]);
#plt.grid(False,axis='y')
if vp==10:plt.legend(['blikn','gaze'])
示例9: arbolesRegresion
def arbolesRegresion(caract):
clf = DecisionTreeRegressor(min_samples_leaf=10, min_samples_split=15, max_depth=13, compute_importances=True)
importancias = [0,0,0,0,0,0,0,0,0,0,0,0,0]
mae=mse=r2=0
kf = KFold(len(boston_Y), n_folds=10, indices=True)
for train, test in kf:
trainX, testX, trainY, testY=boston_X[train], boston_X[test], boston_Y[train], boston_Y[test]
nCar=len(caract)
train=np.zeros((len(trainX), nCar))
test=np.zeros((len(testX), nCar))
trainYNuevo=trainY
for i in range(nCar):
for j in range(len(trainX)):
train[j][i]=trainX[j][caract[i]]
for k in range(len(testX)):
test[k][i]=testX[k][caract[i]]
trainYNuevo=np.reshape(trainYNuevo, (len(trainY), -1))
clf.fit(train, trainYNuevo)
prediccion=clf.predict(test)
# clf.fit(trainX, trainY)
# prediccion=clf.predict(testX)
mae+=metrics.mean_absolute_error(testY, prediccion)
mse+=metrics.mean_squared_error(testY, prediccion)
r2+=metrics.r2_score(testY, prediccion)
feature_importance = clf.feature_importances_
feature_importance = 100.0 * (feature_importance / feature_importance.max())
for i in range(13):
importancias[i] = importancias[i] + feature_importance[i]
print 'Error abs: ', mae/len(kf), 'Error cuadratico: ', mse/len(kf), 'R cuadrado: ', r2/len(kf)
for i in range(13):
importancias[i] = importancias[i]/10
sorted_idx = np.argsort(importancias)
pos = np.arange(sorted_idx.shape[0]) + .5
importancias = np.reshape(importancias, (len(importancias), -1))
boston = datasets.load_boston()
pl.barh(pos, importancias[sorted_idx], align='center')
pl.yticks(pos, boston.feature_names[sorted_idx])
pl.xlabel('Importancia relativa')
pl.show()
import StringIO, pydot
dot_data = StringIO.StringIO()
tree.export_graphviz(clf, out_file=dot_data)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph.write_pdf("bostonTree.pdf")
示例10: do_scaplots
def do_scaplots(distance_dict, after_dict, before_dict, bins, xtext, option=0):
for count, name,ylims in ((0,'m_diff', (-0.5,0.5)),(1,'n diff', (-1,0.5)),(2,'r diff', (-0.5,0.5)),(3, 'ba diff', (-0.05,0.05))):
pl.subplot(2,2,count+1)
if 0:#count ==2:
ns = np.array([after_dict[a][count]/np.max([before_dict[a][count],0.0000001])-1.0 for a in before_dict.keys()]).T
else:
ns = np.array([after_dict[a][count]-before_dict[a][count] for a in before_dict.keys()]).T
bars, edges=np.histogram(ns, bins=100,range=ylims)
bars = bars/float(ns.size)
print ns
#pl.step(bars, edges, *args, **kwargs)
pl.barh((edges[0:-1]+edges[1:])/2, bars, align='center', height = (edges[1:]-edges[0:-1]),alpha=0.4)
#pl.scatter(ns[0,:], ns[1,:], s =3, edgecolor='none', zorder = -900)
nstats = bin_stats.bin_stats(0.25*np.ones_like(ns), ns, (0.0,0.5), -1000.0, 1000.0)
nstats.lay_bounds(color='r', sigma_choice = [68,95])
nstats.plot_ebar('median','med95ci',color='r',ecolor='r',
marker='s', markersize=3, lw=2, linestyle='none')
pl.xlabel(xtext)
pl.ylabel(name)
pl.ylim(ylims)
pl.xlim(0,0.5)
#ax = pl.subplot(2,2,3)
#pl.ylim(-10,10)
pl.subplots_adjust(wspace=0.4, hspace=0.4)
return
示例11: plot_occs_by_motif
def plot_occs_by_motif(by_motif):
"""Plot # occurrences for each motif.
"""
sizes = [
(len(occs), sum(occ.Z for occ in occs), name)
for name, occs in by_motif.iteritems()]
# expected = [(len(occs), name) for name, occs in by_motif.iteritems()]
sizes.sort()
bar_positions = numpy.arange(len(sizes))
num_occs = numpy.asarray([s for s, e, n in sizes])
total_Z = numpy.asarray([e for s, e, n in sizes])
pylab.barh(
bar_positions,
num_occs,
# left=total_Z,
height=.8,
align='center',
label='Sites',
color='blue',
)
pylab.barh(
bar_positions,
total_Z,
height=.8,
align='center',
label='Total Z',
color='blue',
edgecolor='white',
hatch='/',
)
pylab.yticks(bar_positions, [n for x, e, n in sizes])
pylab.ylim(ymin=-.5, ymax=len(sizes) - .5)
pylab.xlabel('occurrences')
pylab.legend(loc='lower right')
示例12: wiki_sizes_chart
def wiki_sizes_chart(path, prefixes, upperlimit = None ):
prefixes, sizes = zip(*sorted( [(pr, dumpSize(pr)) for pr in prefixes]
, key = operator.itemgetter(1)
)
)
blockSize = 5
ind = p.arange(0, blockSize*len(prefixes), blockSize) # y location for groups
height = 4 # bar height
#colors = ['g','r','c','m','y']
colors = html_colors
thresholds = [5000, 2000,1000,500,200,100,50,20,10]
#colors = [str(float(i+1) / (len(thresholds)+1)) for i in xrange(len(thresholds))]
#colors.reverse()
overall = p.barh( ind
, sizes
, height
, color = 'b'
, linewidth = 0
, align='center'
)
subbars = []
for i, thresh in enumerate(thresholds) :
subbars.append( p.barh( ind
, [ docs_under_thresh(pr, thresh) for pr in prefixes]
, height
, color = colors[ i % len(colors) ]
, linewidth = 0
, align='center'
)
)
p.ylim(-height, len(prefixes) * blockSize)
if upperlimit:
p.xlim(0, upperlimit)
yfontprop = FontProperties(size=4)
xfontprop = FontProperties(size=4)
p.xlabel('Documents')
p.ylabel('Language Code')
p.title('Number of Documents Under Threshold')
p.yticks(ind, prefixes, fontproperties = yfontprop)
xmin, xmax = p.xlim()
xtick_interval = rounded_interval(xmin, xmax, 20, 2)
p.xticks( p.arange(xmin,xmax,xtick_interval),fontproperties = xfontprop)
p.gca().xaxis.grid(linestyle = '-', linewidth=0.15)
p.gca().yaxis.tick_left()
p.legend( [ b[0] for b in subbars]
, map(str,thresholds)
, prop = xfontprop
, loc = 'lower right'
)
p.savefig(path, dpi=300)
p.close()
p.clf()
示例13: histogram
def histogram(c, plot_name="test", plot_title="", plot_xlabel=""):
import pylab
pylab.figure(1)
pos = pylab.arange(len(c))+.5
pylab.barh(pos, c, align='center')
pylab.yticks(pos, range(1, len(c)+1))
pylab.xlabel(plot_xlabel)
pylab.title(plot_title)
pylab.grid(True)
pylab.savefig(plot_name+".png")
示例14: plot_feature_importance
def plot_feature_importance(feature_importance, feature_names):
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
pl.subplot(1, 2, 2)
pl.barh(pos, feature_importance[sorted_idx], align='center')
pl.yticks(pos, feature_names[sorted_idx])
pl.xlabel('Relative Importance')
pl.title('Variable Importance')
pl.show()
示例15: essay_char
def essay_char(essay):
from pylab import xlabel, ylabel, show, savefig, title,\
yticks, xlim, ylim, xticks, arange, figure, barh, grid, rcParams
from string import ascii_letters
global config
cnt = { x:0 for x in ascii_letters }
for c in essay:
if cnt.has_key(c):
cnt[c] += 1
titlestr = "Essay Char"
figure(figsize=(max(cnt.values())/4, 15), dpi=60)
rcParams['font.size'] = 17
rcParams['text.color'] = 'c'
rcParams['xtick.color'] = 'r'
rcParams['ytick.color'] = 'y'
rcParams['figure.facecolor'] = 'k'
rcParams['figure.edgecolor'] = 'b'
rcParams['savefig.facecolor'] = rcParams['figure.facecolor']
rcParams['savefig.edgecolor'] = rcParams['figure.edgecolor']
rcParams['savefig.dpi'] = rcParams['figure.dpi']
xlim(0, max(cnt.values()*2))
ylim(0, len(cnt)*2)
kbuf = cnt.keys()
kbuf.sort()
xticks(xrange(int(xlim()[0]), int(xlim()[1]), 2), rotation=45)
yticks(xrange(int(ylim()[0]), int(ylim()[1]), 2), kbuf, rotation=-45)
vbuf = [cnt[c] for c in kbuf]
grid()
for n, w in zip(xrange(len(vbuf)+1), vbuf):
barh(n*2, w, height=1.5, left=0, align='center')
"""
bar(xrange(1, len(vbuf)+1), height=vbuf,
width=[1]*len(vbuf), bottom=[0]*len(vbuf), align='center')
# orientation='horizontal')
# hist(vbuf, bins=range(1, len(vbuf)+1), #rwidth=1, bottom=0,
# align='mid', orientation='horizontal', alpha=0.7)
"""
title(titlestr)
xlabel('Characters Count')
ylabel('Essay Characters')
# show()
savefig(config['/img']['tools.staticdir.dir'] + '/' + titlestr.replace(' ', '-').lower(), bbox_inches='tight', pad_inches=0)