本文整理汇总了Python中pylab.waitforbuttonpress函数的典型用法代码示例。如果您正苦于以下问题:Python waitforbuttonpress函数的具体用法?Python waitforbuttonpress怎么用?Python waitforbuttonpress使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了waitforbuttonpress函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getCoordinate
def getCoordinate(direction='both',axh=None,fig=None):
"""Tool for selecting a coordinate, functionality similar to ginput for a single point. Finish with right mouse button."""
if not axh:
axh = pl.gca()
if not fig: fig = pl.gcf()
hor=False;ver=False
if direction is 'horizontal' or 'hor' or 'both':
hor=True
if direction is 'vertical' or 'ver' or 'both':
ver=True
finished=False
def button_press_callback(event):
if event.inaxes:
if event.button == 3:
finished = True
fig.canvas.mpl_connect('button_press_event', button_press_callback)
print("Select a coordinate, finish with right click.")
linh = []
while not finished:
for tlinh in linh:
tlinh.remove()
linh = []
pl.draw()
pos = pl.ginput(1)[0]
if hor:
linh.append(pl.axvline(pos[0]))
if ver:
linh.append(pl.axhline(pos[1]))
pl.draw()
pl.waitforbuttonpress()
fig.canvas.draw()
return pos
示例2: once
def once():
global depth, rgb
preview.canvas.SetCurrent()
opennpy.sync_update()
depth,_ = opennpy.sync_get_depth()
rgb,_ = opennpy.sync_get_video()
main.update_frame(depth, rgb)
blockdraw.clear()
#blockdraw.show_grid('o1', main.occvac.occ, color=np.array([1,1,0,1]))
if 'RGB' in stencil.__dict__:
blockdraw.show_grid('occ', grid.occ, color=grid.color)
else:
blockdraw.show_grid('occ', grid.occ, color=np.array([1,0.6,0.6,1]))
preview.clearcolor=[0,0,0,0]
preview.flag_drawgrid = True
if 'R_correct' in main.__dict__:
preview.modelmat = main.R_display
else:
preview.modelmat = main.R_aligned
preview.Refresh()
window.Refresh()
pylab.waitforbuttonpress(0.005)
示例3: once
def once():
global depth, rgb
if not FOR_REAL:
dataset.advance()
depth = dataset.depth
rgb = dataset.rgb
else:
opennpy.sync_update()
depth,_ = opennpy.sync_get_depth()
rgb,_ = opennpy.sync_get_video()
def from_rect(m,rect):
(l,t),(r,b) = rect
return m[t:b,l:r]
global mask, rect, modelmat
try:
(mask,rect) = preprocess.threshold_and_mask(depth,config.bg)
except IndexError:
return
cv.ShowImage('mask',mask.astype('u1')*255)
global label_image
label_image = classify.predict(depth)
cv.ShowImage('label_image', ((label_image[0]+1)*100*mask).astype('u1'))
pylab.waitforbuttonpress(0.03)
示例4: once
def once():
global depth, rgb
if not FOR_REAL:
dataset.advance()
depth = dataset.depth
rgb = dataset.rgb
else:
opennpy.sync_update()
depth,_ = opennpy.sync_get_depth()
rgb,_ = opennpy.sync_get_video()
main.update_frame(depth, rgb)
blockdraw.clear()
if 'RGB' in stencil.__dict__:
blockdraw.show_grid('occ', grid.occ, color=grid.color)
else:
blockdraw.show_grid('occ', grid.occ, color=np.array([1,0.6,0.6,1]))
window.clearcolor=[0,0,0,0]
window.flag_drawgrid = True
if 'R_correct' in main.__dict__:
window.modelmat = main.R_display
g = blockcraft.translated_rotated(main.R_correct, grid.occ)
talk_to_minecraft(g)
window.Refresh()
pylab.waitforbuttonpress(0.005)
import sys
sys.stdout.flush()
示例5: animate_random
def animate_random(max_iters=1000, mod=100):
global pnew, points_range
# Apply a perturb to points_p
obj.RT = np.eye(4, dtype='f')
obj.RT[:3,3] = -obj.vertices[:,:3].mean(0)
obj.RT[:3,3] += [0,0,-3.0]
RT = obj.RT
prev_rimg = obj.range_render(camera.kinect_camera())
window.canvas.SetCurrent()
pnew = prev_rimg.point_model(True)
points_range = pnew
if 0:
obj.RT = np.dot(RT, M)
rimg = obj.range_render(camera.kinect_camera())
window.canvas.SetCurrent()
pm = rimg.point_model(True)
points_range = pm
for iters in range(max_iters):
pnew, err, npairs, uv = fasticp.fast_icp(rimg, pnew, 1000, dist=0.005)
if iters % mod == 0:
# print '%d iterations, [%d] RMS: %.3f' % (iters, npairs, np.sqrt(err))
window.Refresh()
pylab.waitforbuttonpress(0.02)
pnew = pm
window.Refresh()
pylab.waitforbuttonpress(0.02)
示例6: perturb
def perturb(max_iters=100, mod=10):
global pnew, uv, err, points_range, rimg, range_image
# Apply a perturb to points_p
obj.RT = np.eye(4, dtype='f')
obj.RT[:3,3] = -obj.vertices[:,:3].mean(0)
obj.RT[:3,3] += [0,0,-3.0]
# Rotated object view
RT = obj.RT
rp = random_perturbation().astype('f')
obj.RT = np.dot(rp, obj.RT)
range_image = obj.range_render(camera.kinect_camera())
obj.RT = RT
points_range = range_image.point_model(True)
# Original object view
rimg = obj.range_render(camera.kinect_camera())
pnew = rimg.point_model()
# Estimate the transformation rp
for iters in range(max_iters):
npairs, pnew = model.align(range_image, pnew, rtmodel.RATE1, rtmodel.DIST1, 6)
#pnew, err, npairs, uv = fasticp.fast_icp(range_image, pnew, 0.1, dist=0.05)
if iters % mod == 0 or 1:
#print '%d iterations, [%d] RMS: %.3f' % (iters, npairs, np.sqrt(err))
window.Refresh()
pylab.waitforbuttonpress(0.02)
break
window.Refresh()
示例7: error
def error(x):
theta, dist = x
line = middle_offset(theta, dist, size)
s = 1./(d.score(line, True) + 1e-5)
clf()
imshow(d.debug * d.image)
pylab.waitforbuttonpress(0.01)
return s
示例8: show_depth
def show_depth(name, depth):
#im = cv.CvreateImage((depth.shape[1],depth.shape[0]), 8, 3)
#cv.SetData(im, colormap.color_map(depth/2))
#cv.ShowImage(name, im)
#cv2.imshow(name, colormap.color_map(depth/2))
cv2.imshow(name, 1024./depth)
#pylab.imshow(colormap.color_map(depth))
pylab.waitforbuttonpress(0.005)
示例9: testing
def testing(self, testFace, visualiseInfo=None):
# Returns the predictive mean, the predictive variance and the axis (pp) of the latent space backwards mapping.
ret = self.SAMObject.pattern_completion(testFace, visualiseInfo=visualiseInfo)
mm = ret[0]
vv = ret[1]
post = ret[3]
# find nearest neighbour of mm and SAMObject.model.X
dists = numpy.zeros((self.SAMObject.model.X.shape[0],1))
facePredictionBottle = yarp.Bottle()
for j in range(dists.shape[0]):
dists[j,:] = distance.euclidean(self.SAMObject.model.X.mean[j,:], mm[0].values)
print "Dist: " + str(testFace.shape)
nn, min_value = min(enumerate(dists), key=operator.itemgetter(1))
if self.SAMObject.type == 'mrd':
ret_y = self.SAMObject.model.bgplvms[1]._raw_predict(post.X)
vv_y = ret_y[1]
print "With " + str(vv.mean()) + "(" + str(vv_y) + ")" +" prob. error the new image is " + self.participant_index[int(self.SAMObject.model.bgplvms[1].Y[nn,:])]
textStringOut=self.participant_index[int(self.SAMObject.model.bgplvms[1].Y[nn,:])]
elif self.SAMObject.type == 'bgplvm':
print "With " + str(vv.mean()) +" prob. error the new image is " + self.participant_index[int(self.L[nn,:])]
textStringOut=self.participant_index[int(self.L[nn,:])]
if (vv.mean()<0.00012):
choice=numpy.random.randint(4)
if (choice==0):
facePredictionBottle.addString("Hello " + textStringOut)
elif(choice==1):
facePredictionBottle.addString("I am watching you " + textStringOut)
elif(choice==2):
facePredictionBottle.addString(textStringOut + " could you move a little you are blocking my view of the outside")
else:
facePredictionBottle.addString(textStringOut + " will you be my friend")
# Otherwise ask for updated name... (TODO: add in updated name)
else:
facePredictionBottle.addString("I think you are " + textStringOut + " but I am not sure, please confirm?")
# Plot the training NN of the test image (the NN is found in the INTERNAl, compressed (latent) memory space!!!)
if visualiseInfo is not None:
fig_nn = visualiseInfo['fig_nn']
fig_nn = pb.figure(11)
pb.title('Training NN')
fig_nn.clf()
pl_nn = fig_nn.add_subplot(111)
pl_nn.imshow(numpy.reshape(self.SAMObject.recall(nn),(self.imgHeightNew, self.imgWidthNew)), cmap=plt.cm.Greys_r)
pb.title('Training NN')
pb.show()
pb.draw()
pb.waitforbuttonpress(0.1)
self.speakStatusPort.write(self.speakStatusOutBottle, self.speakStatusInBottle)
if( self.speakStatusInBottle.get(0).asString() == "quiet"):
self.outputFacePrection.write(facePredictionBottle)
facePredictionBottle.clear()
示例10: once
def once():
dataset.advance()
depthL, depthR = dataset.depthL, dataset.depthR
maskL, rectL = preprocess.threshold_and_mask(depthL, config.bgL)
maskR, rectR = preprocess.threshold_and_mask(depthR, config.bgR)
show_mask("maskL", maskL.astype("f"), rectL)
show_mask("maskR", maskR.astype("f"), rectR)
pylab.waitforbuttonpress(0.01)
示例11: check_dataset
def check_dataset(dataset, labels, label_map, index):
data = np.uint8(dataset[index]).reshape((32, 32))
i = np.argwhere(labels[index] == 1)[0][0]
import matplotlib.pyplot as plt # im.show may not be implemented
# in opencv-python on Tk GUI (such as Linux)
import pylab
plt.ion()
plt.imshow(data)
pylab.waitforbuttonpress(timeout=5)
print("label:", label_map[i])
示例12: animate
def animate():
while True:
line = random_middle_line()
d = DividingLine(synthetic_image(line=line))
d.traverse(line, True)
#d.traverse_np(line, True)
pylab.clf()
pylab.imshow(d.debug)
pylab.waitforbuttonpress(0.01)
示例13: sample_rays
def sample_rays(n_rays=10000, reset=False):
global paths
global total_rays
global line_verts, line_colors
if reset or not 'line_verts' in globals():
paths = []
total_rays = 0
line_verts = np.empty((0,3),'f')
line_colors = np.empty((0,3),'f')
total_rays += n_rays
line_verts_ = []
line_colors_ = []
ps = mycybvh.sample_rays(source, sink, sinkrad, n_rays, ROULETTE)
keys = ['source','sink','diverge','scaflect']
for path in ps:
p_ = []
x1 = path[0]['origin']
x1 = x1['x'], x1['y'], x1['z']
orgn = True
for p in path[1:]:
o = p['origin']
d = p['direction']
ntype = keys[p['ntype']]
cumdist = p['cumdist']
origin = o['x'],o['y'],o['z']
direction = d['x'],d['y'],d['z']
x2 = origin
if ntype == 'sink':
line_colors_ += 2*((1,.6,.6),)
elif orgn:
line_colors_ += 2*((.6,.6,1),)
orgn=False
else:
line_colors_ += 2*((1,1,1),)
x2 = origin
line_verts_.append(x1)
line_verts_.append(x2)
x1 = x2
p_.append((origin, direction, ntype, cumdist))
paths.append(p_)
if line_colors_:
line_colors = np.vstack((line_colors, np.array(line_colors_,'f')))
line_verts = np.vstack((line_verts, np.array(line_verts_,'f')))
window.Refresh()
pylab.clf();
times, pressure = energy_contributions()
pylab.hist(times,weights=pressure,bins=100, range=(0,0.2))
pylab.waitforbuttonpress(0.03)
update_filter()
示例14: testing
def testing(self, testFace, choice, visualiseInfo=None):
# Returns the predictive mean, the predictive variance and the axis (pp) of the latent space backwards mapping.
#mm,vv,pp=self.SAMObject.pattern_completion(testFace, visualiseInfo=visualiseInfo)
ret=self.SAMObject.pattern_completion(testFace, visualiseInfo=visualiseInfo)
mm = ret[0]
vv = ret[1]
post = ret[3]
# find nearest neighbour of mm and SAMObject.model.X
dists = numpy.zeros((self.SAMObject.model.X.shape[0],1))
facePredictionBottle = yarp.Bottle()
for j in range(dists.shape[0]):
dists[j,:] = distance.euclidean(self.SAMObject.model.X.mean[j,:], mm[0].values)
nn, min_value = min(enumerate(dists), key=operator.itemgetter(1))
if self.SAMObject.type == 'mrd':
print "With " + str(vv.mean()) +" prob. error the new image is " + self.participant_index[int(self.SAMObject.model.bgplvms[1].Y[nn,:])]
textStringOut=self.participant_index[int(self.SAMObject.model.bgplvms[1].Y[nn,:])]
elif self.SAMObject.type == 'bgplvm':
print "With " + str(vv.mean()) +" prob. error the new image is " + self.participant_index[int(self.L[nn,:])]
textStringOut=self.participant_index[int(self.L[nn,:])]
if(choice.get(0).asInt() == 16 and vv.mean()<0.00012):
facePredictionBottle.addString("You are " + textStringOut)
elif(choice.get(0).asInt() == 16 and vv.mean()>0.00012):
facePredictionBottle.addString("I think you are " + textStringOut + " but I am not sure, please confirm?")
# Plot the training NN of the test image (the NN is found in the INTERNAl, compressed (latent) memory space!!!)
if visualiseInfo is not None:
fig_nn = visualiseInfo['fig_nn']
fig_nn = pb.figure(11)
pb.title('Training NN')
fig_nn.clf()
pl_nn = fig_nn.add_subplot(111)
pl_nn.imshow(numpy.reshape(self.SAMObject.recall(nn),(self.imgHeightNew, self.imgWidthNew)), cmap=plt.cm.Greys_r)
pb.title('Training NN')
pb.show()
pb.draw()
pb.waitforbuttonpress(0.1)
self.speakStatusPort.write(self.speakStatusOutBottle, self.speakStatusInBottle)
if( self.speakStatusInBottle.get(0).asString() == "quiet"):
self.outputFacePrection.write(facePredictionBottle)
facePredictionBottle.clear()
#return pp
return ret[2]
示例15: click_point
def click_point(im):
fig = pylab.figure(1);
pylab.imshow(im)
#pylab.xlim(xlim)
#pylab.ylim(ylim)
point = []
def pick(event):
point.append((event.xdata, event.ydata))
cid = fig.canvas.mpl_connect('button_press_event', pick)
print("Click a point")
while not point: pylab.waitforbuttonpress()
print "Ok!", point
return point[0]