本文整理汇总了Python中logger.get_logger方法的典型用法代码示例。如果您正苦于以下问题:Python logger.get_logger方法的具体用法?Python logger.get_logger怎么用?Python logger.get_logger使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类logger
的用法示例。
在下文中一共展示了logger.get_logger方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: import logger [as 别名]
# 或者: from logger import get_logger [as 别名]
def main(m_path, img_path, out_dir):
logger = get_logger("inference")
logger.info(f"generating image from {img_path}")
imported = tf.saved_model.load(m_path)
f = imported.signatures["serving_default"]
img = np.array(Image.open(img_path).convert("RGB"))
img = np.expand_dims(img, 0).astype(np.float32) / 127.5 - 1
out = f(tf.constant(img))['output_1']
out = ((out.numpy().squeeze() + 1) * 127.5).astype(np.uint8)
if out_dir != "" and not os.path.isdir(out_dir):
os.makedirs(out_dir)
if out_dir == "":
out_dir = "."
out_path = os.path.join(out_dir, os.path.split(img_path)[1])
imwrite(out_path, out)
logger.info(f"generated image saved to {out_path}")
示例2: main
# 需要导入模块: import logger [as 别名]
# 或者: from logger import get_logger [as 别名]
def main(m_path, img_path, out_dir, light=False):
logger = get_logger("inference")
logger.info(f"generating image from {img_path}")
try:
g = Generator(light=light)
g.load_weights(tf.train.latest_checkpoint(m_path))
except ValueError as e:
logger.error(e)
logger.error("Failed to load specified weight.")
logger.error("If you trained your model with --light, "
"consider adding --light when executing this script; otherwise, "
"do not add --light when executing this script.")
exit(1)
img = np.array(Image.open(img_path).convert("RGB"))
img = np.expand_dims(img, 0).astype(np.float32) / 127.5 - 1
out = ((g(img).numpy().squeeze() + 1) * 127.5).astype(np.uint8)
if out_dir != "" and not os.path.isdir(out_dir):
os.makedirs(out_dir)
if out_dir == "":
out_dir = "."
out_path = os.path.join(out_dir, os.path.split(img_path)[1])
imwrite(out_path, out)
logger.info(f"generated image saved to {out_path}")
示例3: test_logging
# 需要导入模块: import logger [as 别名]
# 或者: from logger import get_logger [as 别名]
def test_logging(self):
self.assertTrue(not os.path.exists('test.log'))
logger.initialize_logging('test.log', max_len=1024, interactive=True)
l = logger.get_logger()
self.assertTrue(os.path.exists('test.log'))
self.assertTrue(not os.path.exists('test.log.1'))
l.debug('debug msg')
l.info('info msg')
l.warn('warn')
l.error('error')
l.info('d'*1024) # force a rollover
l.info('new file')
self.assertTrue(os.path.exists('test.log'))
self.assertTrue(os.path.exists('test.log.1'))
示例4: main
# 需要导入模块: import logger [as 别名]
# 或者: from logger import get_logger [as 别名]
def main(m_path, out_dir, light):
logger = get_logger("export")
try:
g = Generator(light=light)
g.load_weights(tf.train.latest_checkpoint(m_path))
t = tf.keras.Input(shape=[None, None, 3], batch_size=None)
g(t, training=False)
g.summary()
except ValueError as e:
logger.error(e)
logger.error("Failed to load specified weight.")
logger.error("If you trained your model with --light, "
"consider adding --light when executing this script; otherwise, "
"do not add --light when executing this script.")
exit(1)
m_num = 0
smd = os.path.join(out_dir, "SavedModel")
tfmd = os.path.join(out_dir, "tfjs_model")
if light:
smd += "Light"
tfmd += "_light"
saved_model_dir = f"{smd}_{m_num:04d}"
tfjs_model_dir = f"{tfmd}_{m_num:04d}"
while os.path.exists(saved_model_dir):
m_num += 1
saved_model_dir = f"{smd}_{m_num:04d}"
tfjs_model_dir = f"{tfmd}_{m_num:04d}"
tf.saved_model.save(g, saved_model_dir)
cmd = ['tensorflowjs_converter', '--input_format', 'tf_saved_model',
'--output_format', 'tfjs_graph_model', saved_model_dir, tfjs_model_dir]
logger.info(" ".join(cmd))
exit_code = Popen(cmd).wait()
if exit_code == 0:
logger.info(f"Model converted to {saved_model_dir} and {tfjs_model_dir} successfully")
else:
logger.error("tfjs model conversion failed")
示例5: main
# 需要导入模块: import logger [as 别名]
# 或者: from logger import get_logger [as 别名]
def main(framework, train_main, generate_main):
arg_parser = ArgumentParser(
description="{} character embeddings LSTM text generation model.".format(framework))
subparsers = arg_parser.add_subparsers(title="subcommands")
# train args
train_parser = subparsers.add_parser("train", help="train model on text file")
train_parser.add_argument("--checkpoint-path", required=True,
help="path to save or load model checkpoints (required)")
train_parser.add_argument("--text-path", required=True,
help="path of text file for training (required)")
train_parser.add_argument("--restore", nargs="?", default=False, const=True,
help="whether to restore from checkpoint_path "
"or from another path if specified")
train_parser.add_argument("--seq-len", type=int, default=64,
help="sequence length of inputs and outputs (default: %(default)s)")
train_parser.add_argument("--embedding-size", type=int, default=32,
help="character embedding size (default: %(default)s)")
train_parser.add_argument("--rnn-size", type=int, default=128,
help="size of rnn cell (default: %(default)s)")
train_parser.add_argument("--num-layers", type=int, default=2,
help="number of rnn layers (default: %(default)s)")
train_parser.add_argument("--drop-rate", type=float, default=0.,
help="dropout rate for rnn layers (default: %(default)s)")
train_parser.add_argument("--learning-rate", type=float, default=0.001,
help="learning rate (default: %(default)s)")
train_parser.add_argument("--clip-norm", type=float, default=5.,
help="max norm to clip gradient (default: %(default)s)")
train_parser.add_argument("--batch-size", type=int, default=64,
help="training batch size (default: %(default)s)")
train_parser.add_argument("--num-epochs", type=int, default=32,
help="number of epochs for training (default: %(default)s)")
train_parser.add_argument("--log-path", default=os.path.join(os.path.dirname(__file__), "main.log"),
help="path of log file (default: %(default)s)")
train_parser.set_defaults(main=train_main)
# generate args
generate_parser = subparsers.add_parser("generate", help="generate text from trained model")
generate_parser.add_argument("--checkpoint-path", required=True,
help="path to load model checkpoints (required)")
group = generate_parser.add_mutually_exclusive_group(required=True)
group.add_argument("--text-path", help="path of text file to generate seed")
group.add_argument("--seed", default=None, help="seed character sequence")
generate_parser.add_argument("--length", type=int, default=1024,
help="length of character sequence to generate (default: %(default)s)")
generate_parser.add_argument("--top-n", type=int, default=3,
help="number of top choices to sample (default: %(default)s)")
generate_parser.add_argument("--log-path", default=os.path.join(os.path.dirname(__file__), "main.log"),
help="path of log file (default: %(default)s)")
generate_parser.set_defaults(main=generate_main)
args = arg_parser.parse_args()
get_logger("__main__", log_path=args.log_path, console=True)
logger = get_logger(__name__, log_path=args.log_path, console=True)
logger.debug("call: %s", " ".join(sys.argv))
logger.debug("ArgumentParser: %s", args)
try:
args.main(args)
except Exception as e:
logger.exception(e)
示例6: main
# 需要导入模块: import logger [as 别名]
# 或者: from logger import get_logger [as 别名]
def main(m_path, out_dir, light=False, test_out=True):
logger = get_logger("tf1_export", debug=test_out)
g = Generator(light=light)
t = tf.placeholder(tf.string, [])
x = tf.expand_dims(tf.image.decode_jpeg(tf.read_file(t), channels=3), 0)
x = (tf.cast(x, tf.float32) / 127.5) - 1
x = g(x, training=False)
out = tf.cast((tf.squeeze(x, 0) + 1) * 127.5, tf.uint8)
in_name, out_name = t.op.name, out.op.name
try:
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
g.load_weights(tf.train.latest_checkpoint(m_path))
in_graph_def = tf.get_default_graph().as_graph_def()
out_graph_def = tf.graph_util.convert_variables_to_constants(
sess, in_graph_def, [out_name])
tf.reset_default_graph()
tf.import_graph_def(out_graph_def, name='')
except ValueError:
logger.error("Failed to load specified weight.")
logger.error("If you trained your model with --light, "
"consider adding --light when executing this script; otherwise, "
"do not add --light when executing this script.")
exit(1)
makedirs(out_dir)
m_cnt = 0
bpath = 'optimized_graph_light' if light else 'optimized_graph'
out_path = os.path.join(out_dir, f'{bpath}_{m_cnt:04d}.pb')
while os.path.exists(out_path):
m_cnt += 1
out_path = os.path.join(out_dir, f'{bpath}_{m_cnt:04d}.pb')
with tf.gfile.GFile(out_path, 'wb') as f:
f.write(out_graph_def.SerializeToString())
if test_out:
with tf.Graph().as_default():
gd = tf.GraphDef()
with tf.gfile.GFile(out_path, 'rb') as f:
gd.ParseFromString(f.read())
tf.import_graph_def(gd, name='')
tf.get_default_graph().finalize()
t = tf.get_default_graph().get_tensor_by_name(f"{in_name}:0")
out = tf.get_default_graph().get_tensor_by_name(f"{out_name}:0")
from time import time
start = time()
with tf.Session() as sess:
img = Image.fromarray(sess.run(out, {t: "input_images/temple.jpg"}))
img.show()
elapsed = time() - start
logger.debug(f"{elapsed} sec per img")
logger.info(f"successfully exported ckpt to {out_path}")
logger.info(f"input var name: {in_name}:0")
logger.info(f"output var name: {out_name}:0")