本文整理汇总了Python中matplotlib.cbook.flatten函数的典型用法代码示例。如果您正苦于以下问题:Python flatten函数的具体用法?Python flatten怎么用?Python flatten使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了flatten函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: handle_returning_base_goals
def handle_returning_base_goals(self, data=None):
# If input is received, use it. Otherwise, use data from above.
if data != None:
score_sheet = copy.copy(data)
else:
score_sheet = copy.copy(self.score_sheet)
# I only want to output the configuration with the best score, so first I grab it from the score sheet.
best_score_cfg = score_sheet[0, 0]
best_score_score = score_sheet[0, 1]
pr2_base_output = []
configuration_output = []
# Outputs the best location for the pr2
# base and the best "other" configurations in two separate lists.
# Format of output is:
# [x (m), y (m), theta (radians)], [pr2_z_axis (cm), autobed_height (cm), autobed_headrest_angle (radians)]
# The current output for the robot base location is the transformation from the goal position for the robot base
# to the AR tag.
# For a task with a solution of multiple configurations, each configuration will be appended to the previous list.
# E.g. [x1, y1, th1, x2, y2, th2] where the first three entries correspond to the first configuration.
for i in xrange(len(best_score_cfg[0])):
origin_B_goal = np.matrix([[m.cos(best_score_cfg[2][i]), -m.sin(best_score_cfg[2][i]), 0., best_score_cfg[0][i]],
[m.sin(best_score_cfg[2][i]), m.cos(best_score_cfg[2][i]), 0., best_score_cfg[1][i]],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
pr2_B_goal = self.origin_B_pr2.I * origin_B_goal
goal_B_ar = pr2_B_goal.I * self.pr2_B_ar
pos_goal, ori_goal = Bmat_to_pos_quat(goal_B_ar)
pr2_base_output.append([pos_goal[0], pos_goal[1], m.acos(pr2_B_goal[0, 0])])
configuration_output.append([best_score_cfg[3][i], 100*best_score_cfg[4][i], np.degrees(best_score_cfg[5][i])])
print 'Base selection service is done and has completed preparing its result.'
return list(flatten(pr2_base_output)), list(flatten(configuration_output))
示例2: get_subset
def get_subset(df, settings, dd_name, quiet=False):
"""
Select only those columns specified under settings.
Optionaly
Parameters
----------
df : DataFrame
settings : dictionary with "dd_to_vars" column
dd_name : str. for the lookup.
quiet: Bool. If True will print, but not raise, on some columns
from settings not being in the df's columns.
Returns
-------
subset : DataFrame.
"""
cols = {x for x in flatten(settings["dd_to_vars"][dd_name].values())}
good_cols = {x for x in flatten(settings["dd_to_vars"]["jan2013"].values())}
all_cols = cols.union(good_cols)
subset = df.columns.intersection(all_cols)
if not quiet:
print("Implicitly dropping {}".format(cols.symmetric_difference(subset)))
return df[subset]
示例3: _data_update
def _data_update(artists, workspace):
# errorbar with workspaces can only return a single container
container_orig = artists[0]
# It is not possible to simply reset the error bars so
# we have to plot new lines but ensure we don't reorder them on the plot!
orig_idx = self.containers.index(container_orig)
container_orig.remove()
# The container does not remove itself from the containers list
# but protect this just in case matplotlib starts doing this
try:
self.containers.remove(container_orig)
except ValueError:
pass
# this gets pushed back onto the containers list
container_new = plotfunctions.errorbar(self, workspace, **kwargs)
self.containers.insert(orig_idx, container_new)
self.containers.pop()
# update line properties to match original
orig_flat, new_flat = cbook.flatten(container_orig), cbook.flatten(container_new)
for artist_orig, artist_new in zip(orig_flat, new_flat):
artist_new.update_from(artist_orig)
# ax.relim does not support collections...
self._update_line_limits(container_new[0])
self.autoscale()
return container_new
示例4: recurse
def recurse(s, l, w):
for ww, ll in zip(w, l):
if type(ww) is list:
for e in flatten(ll):
s.Add((ww[0] == 0) <= (e == 0))
recurse(s, ll[1:], ww[1:])
else:
for e in flatten(ll):
s.Add((ww == 0) <= (e == 0))
示例5: calc_fscore
def calc_fscore(r,p):
"""
given recall and precision arrays, calculate the f-measure (f-score)
"""
a = array(zip(flatten(r),flatten(p)))
r,p = a[:,0],a[:,1]
idx = where(r)
r,p = r[idx],p[idx]
F = (2*p*r/(p+r)).mean()
return F
示例6: evaluate_classifier
def evaluate_classifier(fname='saved_data.pickle', use_pca=True, null_clf=False, eps=finfo(float).eps, clip=-100):
"""
Gaussian classifier for non-tuned / autotuned equal-temparement magnitudes
"""
with open(fname,'rb') as f:
data = pickle.load(f)
a0 = array([[dd['nontuned_mags'] for dd in d] for d in data[1::2]])
a1 = array([[dd['autotuned_mags'] for dd in d] for d in data[1::2]])
P,TP,FN,FP,TN,PR,RE = [],[],[],[],[],[],[]
T0W0,T0W1,T1W0,T1W1 = [],[],[],[]
for song in arange(len(a0)):
# per-song precision / recall
idx = setdiff1d(arange(len(a0)),[song])
train0=dB(array([a for a in flatten(a0[idx])]))
train1=dB(array([a for a in flatten(a1[idx])]))
test0=dB(array([a for a in flatten(a0[song])]))
test1=dB(array([a for a in flatten(a1[song])]))
if use_pca:
u,s,v = svd(array([train0,train1]).T,0)
train0 = u[:,0]
train1 = u[:,1]
test = array([test0,test1]).T
test = dot(dot(test,v.T),diag(1./s))
test0 = test[:,0]
test1 = test[:,1]
m0,v0 = train0.mean(),train0.var()
m1,v1 = train1.mean(),train1.var()
P.append(len(test0))
t1w0,t1w1 = log(eval_gauss(test1,m0,v0)+eps), log(eval_gauss(test1,m1,v1)+eps)
t0w0,t0w1 = log(eval_gauss(test0,m0,v0)+eps), log(eval_gauss(test0,m1,v1)+eps)
if clip!=0:
t1w0[t1w0<clip]=clip
t1w1[t1w1<clip]=clip
t0w0[t0w0<clip]=clip
t0w1[t0w1<clip]=clip
T0W0.append(t0w0)
T0W1.append(t0w1)
T1W0.append(t1w0)
T1W1.append(t1w1)
TP.append(sum(t1w1>t1w0))
FN.append(sum(t1w1<=t1w0))
FP.append(sum(t0w1>t0w0))
TN.append(sum(t0w1<=t0w0))
prec,rec = calc_precrec(t0w0,t0w1,t1w0,t1w1,null_clf)
PR.append(prec)
RE.append(rec)
F = calc_fscore(RE,PR)
return {'P':array(P),'TP':array(TP),'FN':array(FN),'FP':array(FP),'TN':array(TN),
'PR':PR,'RE':RE,'F':F, 'T0W0':T0W0,'T0W1':T0W1,'T1W0':T1W0,'T1W1':T1W1}
示例7: coverage
def coverage(paths, xs, weights):
"""
Computes coverage of map, using xs as features weighted by weights.
"""
subset = list(set(cbook.flatten(paths)))
total = (1 - np.prod(1 - xs[subset], axis=0)).dot(weights)
return total
示例8: generate_features
def generate_features(self, depth, subsample_n):
NA_VAL=-100
if depth<1:
return
for relation in random.choice(self.relations.keys(), subsample_n, True):
new_obs_for_rel= apply_transforms(self.relations, [relation], self.entities)
for ob in frozenset(flatten(new_obs_for_rel)):
self.new_features.append(lambda x, rel=relation, t=ob: 1 if is_in_relation(x, self.relations[rel], rel, t) else NA_VAL if len(is_in_relation(x, self.relations[rel], rel))==0 else 0 )
self.new_justify.append('is in relation %s with %s'%(relation, ob))
if depth==2:
for relation2 in random.choice(self.relations.keys(), 1):
newer_obs= apply_transforms(self.relations, [relation2], new_obs_for_rel)
for ob in frozenset(flatten(newer_obs)):
self.new_features.append(lambda x, trans=[relation,relation2], t=ob: 1 if t in apply_transforms(self.relations, trans, [x])[0] else NA_VAL if len(apply_transforms(self.relations, trans, [x])[0])==0 else 0)
self.new_justify.append('is in relations %s,%s with %s'%(relation, relation2, ob))
pass #if depth=1, go one relation. if depth=2 go 2 relations and so on...
示例9: remove
def remove(self):
for c in cbook.flatten(
self, scalarp=lambda x: isinstance(x, martist.Artist)):
c.remove()
if self._remove_method:
self._remove_method(self)
示例10: annotations
def annotations(self, shortvarnames=None, debug=False):
"""
Return a list of TeX-formatted labels
The values and errors are formatted so that only the significant digits
are displayed. Rounding is performed using the decimal package.
Parameters
----------
shortvarnames : list
A list of variable names (tex is allowed) to include in the
annotations. Defaults to self.shortvarnames
Examples
--------
>>> # Annotate a Gaussian
>>> sp.specfit.annotate(shortvarnames=['A','\\Delta x','\\sigma'])
"""
from decimal import Decimal # for formatting
svn = self.shortvarnames if shortvarnames is None else shortvarnames
# if pars need to be replicated....
if len(svn) < self.npeaks * self.npars:
svn = svn * self.npeaks
parvals = self.parinfo.values
parerrs = self.parinfo.errors
loop_list = [
(
parvals[ii + jj * self.npars + self.vheight],
parerrs[ii + jj * self.npars + self.vheight],
svn[ii + jj * self.npars],
self.parinfo.fixed[ii + jj * self.npars + self.vheight],
jj,
)
for jj in range(self.npeaks)
for ii in range(self.npars)
]
label_list = []
for (value, error, varname, fixed, varnumber) in loop_list:
log.debug(", ".join([str(x) for x in (value, error, varname, fixed, varnumber)]))
if fixed or error == 0:
label = "$%s(%i)$=%8s" % (
varname,
varnumber,
Decimal("%g" % value).quantize(Decimal("%0.6g" % (value))),
)
else:
label = "$%s(%i)$=%8s $\\pm$ %8s" % (
varname,
varnumber,
Decimal("%g" % value).quantize(Decimal("%0.2g" % (min(np.abs([value, error]))))),
Decimal("%g" % error).quantize(Decimal("%0.2g" % (error))),
)
label_list.append(label)
labels = tuple(mpcb.flatten(label_list))
return labels
示例11: create_time_slots
def create_time_slots(day):
src_slots = dcal.get_working_times(day)
slots = [0, src_slots, 24*60]
slots = tuple(cbook.flatten(slots))
slots = zip(slots[:-1], slots[1:])
#balance non working slots
work_time = slot_sum(src_slots)
non_work_time = sum_time - work_time
non_slots = filter(lambda s: s not in src_slots, slots)
non_slots = map(lambda s: (s[1] - s[0], s), non_slots)
non_slots.sort()
slots = []
i = 0
for l, s in non_slots:
delta = non_work_time / (len(non_slots) - i)
delta = min(l, delta)
non_work_time -= delta
slots.append((s[0], s[0] + delta))
i += 1
slots.extend(src_slots)
slots.sort()
return slots
示例12: facet_plot
def facet_plot(dframe, facets, props, ydata, layout=None, newfig=True, figsize=None,
legend=True, individual_legends=False, hide_additional_axes=True, zorder='default', **kwargs):
if newfig:
nr_facets = len(dframe.groupby(facets))
if layout is None:
for i in range(2, nr_facets // 2):
if nr_facets % i == 0:
layout = (nr_facets // i, i)
break
if layout is None:
n = int(np.ceil(nr_facets / 2))
layout = (n, 2)
fig, axs = plt.subplots(
nrows=layout[0],
ncols=layout[1],
sharex=True, sharey=True, figsize=figsize
)
if hide_additional_axes:
for ax in fig.axes[nr_facets:]:
ax.set_axis_off()
else:
fig = plt.gcf()
axs = fig.axes
cycl = cycle(plt.rcParams['axes.prop_cycle'])
prop_styles = {ps: next(cycl) for ps, _ in dframe.groupby(props)}
if zorder is 'default':
dz = 1
zorder = 0
elif zorder is 'reverse':
dz = -1
zorder = 0
else:
dz = 0
if legend:
ax0 = fig.add_subplot(111, frame_on=False, zorder=-9999)
ax0.set_axis_off()
plot_kwargs = kwargs.copy()
for k in ['logx', 'logy', 'loglog']:
plot_kwargs.pop(k, None)
for l, p in prop_styles.items():
ax0.plot([], label=str(l), **p, **plot_kwargs)
ax0.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize='x-small')
for ax, (ps, df) in zip(flatten(axs), dframe.groupby(facets, squeeze=False)):
for prop, df_prop in df.groupby(props):
df_prop[ydata].plot(ax=ax, label=str(prop), zorder=zorder, **prop_styles[prop], **kwargs)
zorder += dz
# ax.title(0.5, 0.1, '{},{}'.format(*ps), transform=ax.transAxes, fontsize='small')
ax.set_title('; '.join([str(x) for x in ps]) if isinstance(ps, tuple) else str(ps), fontsize='x-small')
if individual_legends:
ax.legend(fontsize='x-small')
plt.sca(ax)
rect = (0, 0, 0.85, 1) if legend else (0, 0, 1, 1)
plt.tight_layout(rect=rect, pad=0.1)
return fig, axs
示例13: annotations
def annotations(self):
label_list = [(
"$A(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[0+jj*self.npars],self.mpperr[0+jj*self.npars]),
"$x(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[1+jj*self.npars],self.mpperr[1+jj*self.npars]),
"$\\sigma(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[2+jj*self.npars],self.mpperr[2+jj*self.npars])
) for jj in range(self.npeaks)]
labels = tuple(mpcb.flatten(label_list))
return labels
示例14: linear_labels
def linear_labels(nestedlabels):
def reclabels(labels):
lsh = utils.list_shape(labels)
if len(lsh) == 1 or len(lsh) == 0:
return labels # list of strings
first,rest = labels[0], labels[1:]
return [[x] + reclabels(rest) for x in first]
nested = reclabels(nestedlabels)
return list(flatten(nested))
示例15: find_tagging
def find_tagging(top_node, train_point):
#finds tagging without the query func...
if type(top_node.justify)==str and (top_node.justify.startswith('leafed') or top_node.justify.startswith('no')):
return top_node.chosen_tag
if train_point==[]:
return find_tagging(top_node.left_son if []==top_node.left_son.objects[-1] else top_node.right_son, train_point)
if train_point in list(flatten(top_node.left_son.objects)):
return find_tagging(top_node.left_son, train_point)
return find_tagging(top_node.right_son, train_point)