本文整理汇总了Python中tensorpack.utils.utils.get_tqdm_kwargs方法的典型用法代码示例。如果您正苦于以下问题:Python utils.get_tqdm_kwargs方法的具体用法?Python utils.get_tqdm_kwargs怎么用?Python utils.get_tqdm_kwargs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorpack.utils.utils
的用法示例。
在下文中一共展示了utils.get_tqdm_kwargs方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: eval_on_dataflow
# 需要导入模块: from tensorpack.utils import utils [as 别名]
# 或者: from tensorpack.utils.utils import get_tqdm_kwargs [as 别名]
def eval_on_dataflow(df, detect_func):
"""
Args:
df: a DataFlow which produces (image, image_id)
detect_func: a callable, takes [image] and returns [DetectionResult]
Returns:
list of dict, to be dumped to COCO json format
"""
df.reset_state()
all_results = []
with tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()) as pbar:
for img, img_id in df.get_data():
results = detect_func(img)
for r in results:
box = r.box
cat_id = COCOMeta.class_id_to_category_id[r.class_id]
box[2] -= box[0]
box[3] -= box[1]
res = {
'image_id': img_id,
'category_id': cat_id,
'bbox': list(map(lambda x: float(round(x, 1)), box)),
'score': float(round(r.score, 2)),
}
# also append segmentation to results
if r.mask is not None:
rle = cocomask.encode(
np.array(r.mask[:, :, None], order='F'))[0]
rle['counts'] = rle['counts'].decode('ascii')
res['segmentation'] = rle
all_results.append(res)
pbar.update(1)
return all_results
# https://github.com/pdollar/coco/blob/master/PythonAPI/pycocoEvalDemo.ipynb
示例2: main_loop
# 需要导入模块: from tensorpack.utils import utils [as 别名]
# 或者: from tensorpack.utils.utils import get_tqdm_kwargs [as 别名]
def main_loop(self):
# some final operations that might modify the graph
logger.info("[{}] Initializing graph variables ...".format(os.environ['SLURMD_NODENAME']))
#self.sess.run(tf.initialize_all_variables())
self.config.session_init.init(self.sess)
# tf.get_default_graph().finalize()
callbacks = self.config.callbacks
logger.info("[{}] Starting concurrency...".format(os.environ['SLURMD_NODENAME']))
self._start_concurrency()
#with self.sess.as_default():
logger.info("[{}] Setting default session".format(os.environ['SLURMD_NODENAME']))
with ops.default_session(self.sess):
try:
logger.info("[{}] Getting global step".format(os.environ['SLURMD_NODENAME']))
self.global_step = get_global_step()
logger.info("[{}] Start training with global_step={}".format(os.environ['SLURMD_NODENAME'], self.global_step))
if self.config.extra_arg['is_chief']:
server = neptune_mp_server.Server(
self.config.extra_arg['n_workers'],
port=self.config.extra_arg['port'],
debug_charts=self.config.extra_arg['debug_charts'],
adam_debug=self.config.extra_arg['adam_debug'],
schedule_hyper=self.config.extra_arg['schedule_hyper'],
experiment_dir=self.config.extra_arg['experiment_dir'])
server.main_loop()
callbacks.before_train()
for epoch in range(self.config.starting_epoch, self.config.max_epoch+1):
with timed_operation(
'Epoch {}, global_step={}'.format(
epoch, self.global_step + self.config.step_per_epoch)):
for step in tqdm.trange(
self.config.step_per_epoch,
**get_tqdm_kwargs(leave=True)):
if self.coord.should_stop():
return
self.run_step()
callbacks.trigger_step()
try:
self.global_step += 1
except:
self.global_step = -1
self.trigger_epoch()
print 'EPOCH ENDS HERE'
except (KeyboardInterrupt, Exception):
raise
finally:
# Do I need to run queue.close?
print('Handling finally block')
callbacks.after_train()
self.coord.request_stop()
self.summary_writer.close()
self.sess.close()