本文整理汇总了Python中util.Timer类的典型用法代码示例。如果您正苦于以下问题:Python Timer类的具体用法?Python Timer怎么用?Python Timer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Timer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: FoundState
class FoundState(object):
def __init__(self):
#after gate has not been seen for 2 secs, quit
self.pathLost = Timer(2)
self.centers = []
sw3.Forward(.3 ).start()
def processFrame(self, frame):
print "found state"
path = vision.ProcessFrame(frame)
if path.found:
print "path found"
self.pathLost.restart()
"""
finding out how many pixels from the center the gate is
the center obj of the gate is the number or pixels over the gate is.
Subtracting the middle pixel index from it returns a pos value if the gate is to left
and pos value if the gate is to the right
"""
print("got direction %d" % path.orientation)
sw3.RelativeYaw(path.orientation).start()
elif not self.pathLost.timeLeft():
"""if the gate has been lost for too long go to gate lost state"""
return PathLostState()
print "ret found"
return self
def cont(self):
#gate missions never stops while we see gate
return True
示例2: run
def run(self, image_name, get_label=False, do_detection=1):
"""detection and extraction with max score box"""
### for web demo
#caffe.set_mode_gpu()
#print "do_detection: ",do_detection
if do_detection:
t1 = Timer()
t1.tic()
image = self.detect(image_name)
t1.toc('Detect time: ')
#print "Detection has done"
else:
image = cv2.imread(image_name)
#image = imresize(im, 300)
t2 = Timer()
t2.tic()
image = pad(image,size=224)
#image = pad(image)
features = extraction.forward(self.net_e, image, self.transformer)
r = np.squeeze(features['pool5/7x7_s1'].data[0])
#features2 = extraction.forward(self.net_e2, image, self.transformer2)
#r2 = np.squeeze(features2['pool5/7x7_s1'].data[0])
#r = r2
#r = np.hstack((r, r2)).copy()
t2.toc('extract time: ')
#start = time.time()
if self.pca is not None:
r = self.pca.transform(r)[0,:]
#print 'pca time: ', time.time() - start
r = r/norm(r)
if get_label:
label = np.squeeze(features['prob'].data[0].copy())
return r, label
return r
示例3: OnwardState
class OnwardState(object):
def __init__(self):
#after path has not been seen for 2 secs, quit
self.pathLost = Timer(LOSTTIME)
self.centers = []
sw3.Forward(SPEED).start()
def processFrame(self, frame):
print "onward state"
path = vision.ProcessFrame(frame)
if path.found:
self.pathLost.restart()
sw3.Forward(SPEED).start()
print "Speed %.2f" % SPEED
elif not self.pathLost.timeLeft():
"""if the path has been lost for too long go to path lost state"""
return LostState()
print "ret found"
return self
def cont(self):
#path missions never stops while we see path
return True
示例4: _collect_metrics_atomic
def _collect_metrics_atomic(self, instance, mor):
""" Task that collects the metrics listed in the morlist for one MOR
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
query = vim.PerformanceManager.QuerySpec(maxSample=1,
entity=mor['mor'],
metricId=mor['metrics'],
intervalId=20,
format='normal')
results = perfManager.QueryPerf(querySpec=[query])
if results:
for result in results[0].value:
if result.id.counterId not in self.metrics_metadata[i_key]:
self.log.debug("Skipping this metric value, because there is no metadata about it")
continue
instance_name = result.id.instance or "none"
value = self._transform_value(instance, result.id.counterId, result.value[0])
self.gauge("vsphere.%s" % self.metrics_metadata[i_key][result.id.counterId]['name'],
value,
hostname=mor['hostname'],
tags=['instance:%s' % instance_name]
)
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.metric_colection.time', t.total())
示例5: search
def search(self, image_path, do_detection=1, k=10):
#queryImage = cv2.imread(image_path)
t1 = Timer()
t1.tic()
#queryFeatures = descriptor.get_descriptor(image_path, multi_box=False)
queryFeatures = descriptor.get_descriptor(image_path)
t1.toc('Feature Extraction time: ')
t2 = Timer()
t2.tic()
#p = Profile()
#results = p.runcall(self.searcher.search, queryFeatures)
#p.print_stats()
results, dists, ind = self.searcher.search(queryFeatures,k=5*k)
#self.reranking(queryFeatures, results, dists, ind, 0.6)
#self.queryExpansion2(results, dists, ind)
#self.queryExpansion(queryFeatures, results, dists, ind, top=3)
t2.toc('Knn search time: ')
result = []
# origine image
#result.append(image_path)
dist = []
for j,imageName in enumerate(results):
if imageName not in result:
result.append(imageName)
dist.append(dists[j])
#print result[:k]
return result[:k],dist[:k]
示例6: _cache_morlist_process_atomic
def _cache_morlist_process_atomic(self, instance, mor):
""" Process one item of the self.morlist_raw list by querying the available
metrics for this MOR and then putting it in self.morlist
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
self.log.debug(
"job_atomic: Querying available metrics"
" for MOR {0} (type={1})".format(mor['mor'], mor['mor_type'])
)
available_metrics = perfManager.QueryAvailablePerfMetric(
mor['mor'], intervalId=REAL_TIME_INTERVAL)
mor['metrics'] = self._compute_needed_metrics(instance, available_metrics)
mor_name = str(mor['mor'])
if mor_name in self.morlist[i_key]:
# Was already here last iteration
self.morlist[i_key][mor_name]['metrics'] = mor['metrics']
else:
self.morlist[i_key][mor_name] = mor
self.morlist[i_key][mor_name]['last_seen'] = time.time()
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.morlist_process_atomic.time', t.total())
示例7: _cache_metrics_metadata
def _cache_metrics_metadata(self, instance):
""" Get from the server instance, all the performance counters metadata
meaning name/group/description... attached with the corresponding ID
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
self.log.info("Warming metrics metadata cache for instance {0}".format(i_key))
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
new_metadata = {}
for counter in perfManager.perfCounter:
d = dict(
name = "%s.%s" % (counter.groupInfo.key, counter.nameInfo.key),
unit = counter.unitInfo.key,
instance_tag = 'instance' # FIXME: replace by what we want to tag!
)
new_metadata[counter.key] = d
self.cache_times[i_key][METRICS_METADATA][LAST] = time.time()
self.log.info("Finished metadata collection for instance {0}".format(i_key))
# Reset metadata
self.metrics_metadata[i_key] = new_metadata
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.metric_metadata_collection.time', t.total())
示例8: _send_message
def _send_message(self, message, success=None, error=None, *args, **kwargs):
if message.startswith('msg'):
try:
how_long = int(message.split()[1])
t = Timer(how_long, self.protocol.incomming_message, self.self_buddy, u"Here's your message %ds later" % how_long)
t.start()
except Exception:
pass
示例9: __init__
def __init__(self, img, scalar, time, onColor=(230, 0, 0),
offColor=(30, 30, 30)):
"""'time' in ms."""
size = tuple(int(scalar*x) for x in img.get_size())
Timer.__init__(self, time, ANSWER_TIMEOUT)
JeopGameSurface.__init__(self, size)
self._front = pygame.transform.smoothscale(img, size)
self.offColor = offColor
self.onColor = onColor
self.dirty = 0
self._draw_off()
示例10: FoundState
class FoundState(object):
def __init__(self):
#after path has not been seen for 2 secs, quit
self.diceLost = Timer(LOSTTIME)
self.centers = []
self.pastDice = False
sw3.Forward(.1).start()
def processFrame(self, frame):
print "found state"
dice = vision.ProcessFrame(frame)
if dice.found:
print "found dice"
self.diceLost.restart()
(x, y, _) = dice.closestLoc(frame)
h,w,_ = frame.shape
heightError = h/2 - y
print('modifying depth by: %.3f' % (heightError / PIXTODEPTH))
sw3.RelativeDepth(heightError / PIXTODEPTH).start()
print "x is : ", x
widthError= x - w/2
print "w is : ", widthError
print('turning: %.3f' % (widthError / PIXTODEPTH))
if widthError > 0:
print "<<"
sw3.RelativeYaw( .0001).start()
else:
print ">>"
sw3.RelativeYaw( -.0001 ).start()
#elif not self.diceLost.timeLeft():
# """if the dice has been lost for too long go to path lost state"""
# return LostState()
if not self.diceLost.timeLeft():
print "stopping seawolf"
sw3.RelativeDepth(0).start()
sw3.Strafe(0).start()
self.pastDice = True
print "ret found"
return self
def cont(self):
#path missions never stops while we see path
return not self.pastDice
示例11: generate_sudoku
def generate_sudoku(self, target = 25):
search = AStarSearch()
base_sudoku = self.generate_full_sudoku()
timer = Timer()
if self.__kind == 'reverse':
problem = ReverseSudokuGenerationProblem(Sudoku(), target, self.solver)
else:
problem = SudokuGenerationProblem(base_sudoku, target, self.solver)
timer.start()
node, cnt_explored = search.search(problem, h = lambda n: problem.value(n.state))
time = timer.stop()
return node.state, len(node.state), cnt_explored, time
示例12: tick
def tick(self, ms):
self.player.move(ms)
if self.player.collide_rect.left >= self.world_map.pixel_width - 16:
debug('Level complete')
return True
# Handle collisions with walls/platforms
try:
self.collide_walls(ms)
except OutOfBounds:
debug('%s out of bounds', self.player.collide_rect)
raise FellOffMap()
# Check for jump every frame, in case user is holding down the button
if not self.jump_wait_timer and self.input_state and self.input_state['up'] and self.jump_timer.jump_allowed():
debug('jump')
self.jump_timer.unset()
self.jump_wait_timer = Timer(config.getint('Physics','jump_wait_time')) # wait a bit between jumps
self.player.start_jump()
elif self.jump_wait_timer:
if self.jump_wait_timer.check(ms):
self.jump_wait_timer = None
# Center camera on player
self.camera.center = self.player.rect.center
# Constrain camera to the level
self.camera.right = min(self.camera.right, self.world_map.pixel_width)
self.camera.bottom = min(self.camera.bottom, self.world_map.pixel_height)
self.camera.left = max(self.camera.left, 0)
self.camera.top = max(self.camera.top, 0)
self.renderer.set_camera_position(self.camera.centerx, self.camera.centery)
self.renderer.set_camera_margin(0, 0, 0, 0) # something is resetting the margin to 16px each frame... grrr
示例13: SearchState
class SearchState(object):
def __init__(self):
self.timer = Timer(SEARCHTIME)
self.foundCounter = 4
def processFrame(self, frame):
path = vision.ProcessFrame(frame)
print "search state"
print path.found
if path.found:
frame = path.draw(frame)
self.foundCounter -= 1
if self.foundCounter <= 0:
#closest point to center is start point
h,w,_ = frame.shape
pt1, pt2 = [[path.p1x, path.p1y], [path.p2x, path.p2y]]
center = (path.cx, path.cy)
#ideal angle is the angle of the end plank
angle1 = getAngleFromCenter(center, pt1)
angle2 = getAngleFromCenter(center, pt2)
if abs(angle1) < abs(angle2):
return TurnState(pt2, pt1)
else:
return TurnState(pt1, pt2)
return self
def cont(self):
""" if true continue mission, false end mission"""
return self.timer.timeLeft()
示例14: SearchState
class SearchState(object):
def __init__(self):
self.timer = Timer(SEARCHTIME)
self.foundCounter = 4
def processFrame(self, frame):
path = vision.ProcessFrame(frame)
print path.found
if path.found:
frame = path.draw(frame)
self.foundCounter -= 1
if self.foundCounter <= 0:
#closest point to center is start point
h,w,_ = frame.shape
pt1, pt2 = [[path.p1x, path.p1y], [path.p2x, path.p2y]]
dist1 = math.sqrt( (w/2 - pt1[0]) ** 2 + (h/2 - pt1[1]) ** 2 )
dist2 = math.sqrt( (w/2 - pt2[0]) ** 2 + (h/2 - pt2[1]) ** 2 )
if dist1 < dist2:
return FoundState(pt1, pt2)
else:
return FoundState(pt2, pt1)
return self
def cont(self):
""" if true continue mission, false end mission"""
return self.timer.timeLeft()
示例15: __init__
def __init__(self, startPt, endPt):
#after path has not been seen for 2 secs, quit
self.pathLost = Timer(LOSTTIME)
self.centers = []
self.startPt = startPt
self.endPt = endPt
sw3.Forward(0).start()