本文整理汇总了Python中util.init_logger方法的典型用法代码示例。如果您正苦于以下问题:Python util.init_logger方法的具体用法?Python util.init_logger怎么用?Python util.init_logger使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类util
的用法示例。
在下文中一共展示了util.init_logger方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: config_initialization
# 需要导入模块: import util [as 别名]
# 或者: from util import init_logger [as 别名]
def config_initialization():
# image shape and feature layers shape inference
image_shape = (FLAGS.train_image_height, FLAGS.train_image_width)
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.DEBUG)
util.init_logger(log_file = 'log_train_seglink_%d_%d.log'%image_shape, log_path = FLAGS.train_dir, stdout = False, mode = 'a')
config.init_config(image_shape,
batch_size = FLAGS.batch_size,
weight_decay = FLAGS.weight_decay,
num_gpus = FLAGS.num_gpus,
train_with_ignored = FLAGS.train_with_ignored,
seg_loc_loss_weight = FLAGS.seg_loc_loss_weight,
link_cls_loss_weight = FLAGS.link_cls_loss_weight,
)
batch_size = config.batch_size
batch_size_per_gpu = config.batch_size_per_gpu
tf.summary.scalar('batch_size', batch_size)
tf.summary.scalar('batch_size_per_gpu', batch_size_per_gpu)
util.proc.set_proc_name(FLAGS.model_name + '_' + FLAGS.dataset_name)
dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
config.print_config(FLAGS, dataset)
return dataset
示例2: main
# 需要导入模块: import util [as 别名]
# 或者: from util import init_logger [as 别名]
def main(_):
util.init_logger()
dump_path = util.io.get_absolute_path('~/temp/no-use/seglink/')
dataset = config_initialization()
batch_queue = create_dataset_batch_queue(dataset)
batch_size = config.batch_size
summary_op = tf.summary.merge_all()
with tf.Session() as sess:
tf.train.start_queue_runners(sess)
b_image, b_seg_label, b_seg_offsets, b_link_label = batch_queue.dequeue()
batch_idx = 0;
while True: #batch_idx < 50:
image_data_batch, seg_label_data_batch, seg_offsets_data_batch, link_label_data_batch = \
sess.run([b_image, b_seg_label, b_seg_offsets, b_link_label])
for image_idx in xrange(batch_size):
image_data = image_data_batch[image_idx, ...]
seg_label_data = seg_label_data_batch[image_idx, ...]
seg_offsets_data = seg_offsets_data_batch[image_idx, ...]
link_label_data = link_label_data_batch[image_idx, ...]
image_data = image_data + [123, 117, 104]
image_data = np.asarray(image_data, dtype = np.uint8)
# decode the encoded ground truth back to bboxes
bboxes = seglink.seglink_to_bbox(seg_scores = seg_label_data,
link_scores = link_label_data,
seg_offsets_pred = seg_offsets_data)
# draw bboxes on the image
for bbox_idx in xrange(len(bboxes)):
bbox = bboxes[bbox_idx, :]
draw_bbox(image_data, bbox)
image_path = util.io.join_path(dump_path, '%d_%d.jpg'%(batch_idx, image_idx))
util.plt.imwrite(image_path, image_data)
print 'Make sure that the text on the image are correctly bounded\
with oriented boxes:', image_path
batch_idx += 1
示例3: config_initialization
# 需要导入模块: import util [as 别名]
# 或者: from util import init_logger [as 别名]
def config_initialization():
# image shape and feature layers shape inference
image_shape = (FLAGS.train_image_height, FLAGS.train_image_width)
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.DEBUG)
util.init_logger(
log_file = 'log_train_pixel_link_%d_%d.log'%image_shape,
log_path = FLAGS.train_dir, stdout = False, mode = 'a')
config.load_config(FLAGS.train_dir)
config.init_config(image_shape,
batch_size = FLAGS.batch_size,
weight_decay = FLAGS.weight_decay,
num_gpus = FLAGS.num_gpus
)
batch_size = config.batch_size
batch_size_per_gpu = config.batch_size_per_gpu
tf.summary.scalar('batch_size', batch_size)
tf.summary.scalar('batch_size_per_gpu', batch_size_per_gpu)
util.proc.set_proc_name('train_pixel_link_on'+ '_' + FLAGS.dataset_name)
dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
config.print_config(FLAGS, dataset)
return dataset
示例4: get_cfg
# 需要导入模块: import util [as 别名]
# 或者: from util import init_logger [as 别名]
def get_cfg(args, fixed_args):
# Parse any additional args
parser = argparse.ArgumentParser()
parser.add_argument(
'--recover-stack-vars',
help='Flag to enable stack variable recovery',
default=False,
action='store_true')
parser.add_argument(
"--std-defs",
action='append',
type=str,
default=[],
help="std_defs file: definitions and calling conventions of imported functions and data")
extra_args = parser.parse_args(fixed_args)
if extra_args.recover_stack_vars:
RECOVER_OPTS['stack_vars'] = True
# Setup logger
util.init_logger(args.log_file)
# Load the binary in binja
bv = util.load_binary(args.binary)
# Once for good measure.
bv.add_analysis_option("linearsweep")
bv.update_analysis_and_wait()
# Twice for good luck!
bv.add_analysis_option("linearsweep")
bv.update_analysis_and_wait()
# Collect all paths to defs files
log.debug('Parsing definitions files')
def_paths = set(map(os.path.abspath, extra_args.std_defs))
def_paths.add(os.path.join(DISASS_DIR, 'defs', '{}.txt'.format(args.os))) # default defs file
# Parse all of the defs files
for fpath in def_paths:
if os.path.isfile(fpath):
parse_defs_file(bv, fpath)
else:
log.warn('%s is not a file', fpath)
# Recover module
log.debug('Starting analysis')
pb_mod = recover_cfg(bv, args)
# Save cfg
log.debug('Saving to file: %s', args.output)
with open(args.output, 'wb') as f:
f.write(pb_mod.SerializeToString())
return 0