本文整理汇总了Python中util.log.warn方法的典型用法代码示例。如果您正苦于以下问题:Python log.warn方法的具体用法?Python log.warn怎么用?Python log.warn使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类util.log
的用法示例。
在下文中一共展示了log.warn方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from util import log [as 别名]
# 或者: from util.log import warn [as 别名]
def __init__(self, config, model, dataset):
self.config = config
self.model = model
self.train_dir = config.train_dir
log.info("self.train_dir = %s", self.train_dir)
# --- input ops ---
self.batch_size = config.batch_size
self.dataset = dataset
check_data_id(dataset, config.data_id)
_, self.batch = create_input_ops(dataset, self.batch_size,
data_id=config.data_id,
is_training=False,
shuffle=False)
self.global_step = tf.contrib.framework.get_or_create_global_step(graph=None)
self.step_op = tf.no_op(name='step_no_op')
tf.set_random_seed(1234)
session_config = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
device_count={'GPU': 1},
)
self.session = tf.Session(config=session_config)
# --- checkpoint and monitoring ---
self.saver = tf.train.Saver(max_to_keep=100)
self.checkpoint = config.checkpoint
if self.checkpoint is None and self.train_dir:
self.checkpoint = tf.train.latest_checkpoint(self.train_dir)
if self.checkpoint is None:
log.warn("No checkpoint is given. Just random initialization :-)")
self.session.run(tf.global_variables_initializer())
else:
log.info("Checkpoint path : %s", self.checkpoint)
示例2: eval_run
# 需要导入模块: from util import log [as 别名]
# 或者: from util.log import warn [as 别名]
def eval_run(self):
# load checkpoint
if self.checkpoint:
self.saver.restore(self.session, self.checkpoint)
log.info("Loaded from checkpoint!")
log.infov("Start 1-epoch Inference and Evaluation")
log.info("# of examples = %d", len(self.dataset))
length_dataset = len(self.dataset)
max_steps = int(length_dataset / self.batch_size) + 1
log.info("max_steps = %d", max_steps)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(self.session,
coord=coord, start=True)
evaler = EvalManager()
try:
for s in xrange(max_steps):
step, loss, step_time, batch_chunk, prediction_pred, prediction_gt = \
self.run_single_step(self.batch)
self.log_step_message(s, loss, step_time)
evaler.add_batch(batch_chunk['id'], prediction_pred, prediction_gt)
except Exception as e:
coord.request_stop(e)
coord.request_stop()
try:
coord.join(threads, stop_grace_period_secs=3)
except RuntimeError as e:
log.warn(str(e))
evaler.report()
log.infov("Evaluation complete.")
示例3: eval_run
# 需要导入模块: from util import log [as 别名]
# 或者: from util.log import warn [as 别名]
def eval_run(self):
# load checkpoint
if self.checkpoint_path:
self.saver.restore(self.session, self.checkpoint_path)
log.info("Loaded from checkpoint!")
log.infov("Start 1-epoch Inference and Evaluation")
log.info("# of examples = %d", len(self.dataset))
length_dataset = len(self.dataset)
max_steps = int(length_dataset / self.batch_size) + 1
log.info("max_steps = %d", max_steps)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(self.session,
coord=coord, start=True)
evaler = EvalManager()
try:
for s in xrange(max_steps):
step, loss, step_time, batch_chunk, prediction_pred, prediction_gt = \
self.run_single_step(self.batch)
self.log_step_message(s, loss, step_time)
evaler.add_batch(batch_chunk['id'], prediction_pred, prediction_gt)
except Exception as e:
coord.request_stop(e)
coord.request_stop()
try:
coord.join(threads, stop_grace_period_secs=3)
except RuntimeError as e:
log.warn(str(e))
evaler.report()
log.infov("Evaluation complete.")
示例4: eval_run
# 需要导入模块: from util import log [as 别名]
# 或者: from util.log import warn [as 别名]
def eval_run(self):
# load checkpoint
if self.checkpoint_path:
self.saver.restore(self.session, self.checkpoint_path)
log.info("Loaded from checkpoint!")
log.infov("Start 1-epoch Inference and Evaluation")
log.info("# of examples = %d", len(self.dataset))
max_steps = self.config.max_steps
log.info("max_steps = %d", max_steps)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(self.session,
coord=coord, start=True)
evaler = EvalManager()
try:
for s in xrange(max_steps):
step, step_time, batch_chunk, prediction_pred, prediction_gt = \
self.run_single_step(self.batch)
self.log_step_message(s, step_time)
evaler.add_batch(batch_chunk['id'], prediction_pred, prediction_gt)
except Exception as e:
coord.request_stop(e)
coord.request_stop()
try:
coord.join(threads, stop_grace_period_secs=3)
except RuntimeError as e:
log.warn(str(e))
if self.config.output_file:
evaler.dump_result(self.config.output_file)
示例5: __init__
# 需要导入模块: from util import log [as 别名]
# 或者: from util.log import warn [as 别名]
def __init__(self, config, model, dataset):
self.config = config
self.model = model
self.train_dir = config.train_dir
self.output_file = config.output_file
log.info("self.train_dir = %s", self.train_dir)
# --- input ops ---
self.batch_size = config.batch_size
self.dataset = dataset
_, self.batch = create_input_ops(dataset, self.batch_size,
is_training=False,
shuffle=False)
self.global_step = tf.contrib.framework.get_or_create_global_step(graph=None)
self.step_op = tf.no_op(name='step_no_op')
tf.set_random_seed(1234)
session_config = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
device_count={'GPU': 1},
)
self.session = tf.Session(config=session_config)
# --- checkpoint and monitoring ---
self.saver = tf.train.Saver(max_to_keep=100)
self.checkpoint = config.checkpoint
if self.checkpoint is None and self.train_dir:
self.checkpoint = tf.train.latest_checkpoint(self.train_dir)
if self.checkpoint is None:
log.warn("No checkpoint is given. Just random initialization :-)")
self.session.run(tf.global_variables_initializer())
else:
log.info("Checkpoint path : %s", self.checkpoint)
示例6: __call__
# 需要导入模块: from util import log [as 别名]
# 或者: from util.log import warn [as 别名]
def __call__(self, input):
if self._deconv_type == 'bilinear':
from ops import bilinear_deconv2d as deconv2d
elif self._deconv_type == 'nn':
from ops import nn_deconv2d as deconv2d
elif self._deconv_type == 'transpose':
from ops import deconv2d
else:
raise NotImplementedError
with tf.variable_scope(self.name, reuse=self._reuse):
if not self._reuse:
log.warn(self.name)
_ = fc(input, self.start_dim_x * self.start_dim_y * self.start_dim_ch,
self._is_train, info=not self._reuse, norm='none', name='fc')
_ = tf.reshape(_, [_.shape.as_list()[0], self.start_dim_y,
self.start_dim_x, self.start_dim_ch])
if not self._reuse:
log.info('reshape {} '.format(_.shape.as_list()))
num_deconv_layer = int(np.ceil(np.log2(
max(float(self._h/self.start_dim_y), float(self._w/self.start_dim_x)))))
for i in range(num_deconv_layer):
_ = deconv2d(_, max(self._c, int(_.get_shape().as_list()[-1]/2)),
self._is_train, info=not self._reuse, norm=self._norm_type,
name='deconv{}'.format(i+1))
if num_deconv_layer - i <= self._num_res_block:
_ = conv2d_res(
_, self._is_train, info=not self._reuse,
name='res_block{}'.format(self._num_res_block - num_deconv_layer + i + 1))
_ = deconv2d(_, self._c, self._is_train, k=1, s=1, info=not self._reuse,
activation_fn=tf.tanh, norm='none',
name='deconv{}'.format(i+2))
_ = tf.image.resize_bilinear(_, [self._h, self._w])
self._reuse = True
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)
return _
示例7: __init__
# 需要导入模块: from util import log [as 别名]
# 或者: from util.log import warn [as 别名]
def __init__(self,
config,
dataset,
dataset_train):
self.config = config
self.train_dir = config.train_dir
log.info("self.train_dir = %s", self.train_dir)
# --- input ops ---
self.batch_size = config.batch_size
self.dataset = dataset
self.dataset_train = dataset_train
check_data_id(dataset, config.data_id)
_, self.batch = create_input_ops(dataset, self.batch_size,
data_id=config.data_id,
is_training=False,
shuffle=False)
# --- create model ---
self.model = Model(config)
self.global_step = tf.contrib.framework.get_or_create_global_step(graph=None)
self.step_op = tf.no_op(name='step_no_op')
tf.set_random_seed(123)
session_config = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
device_count={'GPU': 1},
)
self.session = tf.Session(config=session_config)
# --- checkpoint and monitoring ---
self.saver = tf.train.Saver(max_to_keep=100)
self.checkpoint_path = config.checkpoint_path
if self.checkpoint_path is None and self.train_dir:
self.checkpoint_path = tf.train.latest_checkpoint(self.train_dir)
if self.checkpoint_path is None:
log.warn("No checkpoint is given. Just random initialization :-)")
self.session.run(tf.global_variables_initializer())
else:
log.info("Checkpoint path : %s", self.checkpoint_path)
示例8: eval_run
# 需要导入模块: from util import log [as 别名]
# 或者: from util.log import warn [as 别名]
def eval_run(self):
# load checkpoint
if self.checkpoint_path:
self.saver.restore(self.session, self.checkpoint_path)
log.info("Loaded from checkpoint!")
log.infov("Start Inference and Evaluation")
log.info("# of testing examples = %d", len(self.dataset))
length_dataset = len(self.dataset)
max_steps = int(length_dataset / self.batch_size) + 1
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(self.session,
coord=coord, start=True)
evaler = EvalManager()
if not (self.config.interpolate or self.config.generate or self.config.reconstruct):
raise ValueError('Please specify at least one task by indicating' +
'--reconstruct, --generate, or --interpolate.')
return
if self.config.reconstruct:
try:
for s in xrange(max_steps):
step, loss, step_time, batch_chunk, prediction_pred, prediction_gt = \
self.run_single_step(self.batch)
self.log_step_message(s, loss, step_time)
evaler.add_batch(batch_chunk['id'], prediction_pred, prediction_gt)
except Exception as e:
coord.request_stop(e)
evaler.report()
log.warning('Completed reconstruction.')
if self.config.generate:
x = self.generator(self.batch_size)
img = self.image_grid(x)
imageio.imwrite('generate_{}.png'.format(self.config.prefix), img)
log.warning('Completed generation. Generated samples are save' +
'as generate_{}.png'.format(self.config.prefix))
if self.config.interpolate:
x = self.interpolator(self.dataset_train, self.batch_size)
img = self.image_grid(x)
imageio.imwrite('interpolate_{}.png'.format(self.config.prefix), img)
log.warning('Completed interpolation. Interpolated samples are save' +
'as interpolate_{}.png'.format(self.config.prefix))
coord.request_stop()
try:
coord.join(threads, stop_grace_period_secs=3)
except RuntimeError as e:
log.warn(str(e))
log.infov("Completed evaluation.")
示例9: __init__
# 需要导入模块: from util import log [as 别名]
# 或者: from util.log import warn [as 别名]
def __init__(self,
config,
dataset):
self.config = config
self.train_dir = config.train_dir
log.info("self.train_dir = %s", self.train_dir)
# --- input ops ---
self.batch_size = config.batch_size
self.dataset = dataset
check_data_id(dataset, config.data_id)
_, self.batch = create_input_ops(dataset, self.batch_size,
data_id=config.data_id,
is_training=False,
shuffle=False)
# --- create model ---
self.model = Model(config)
self.global_step = tf.contrib.framework.get_or_create_global_step(graph=None)
self.step_op = tf.no_op(name='step_no_op')
tf.set_random_seed(1234)
session_config = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
device_count={'GPU': 1},
)
self.session = tf.Session(config=session_config)
# --- checkpoint and monitoring ---
self.saver = tf.train.Saver(max_to_keep=100)
self.checkpoint_path = config.checkpoint_path
if self.checkpoint_path is None and self.train_dir:
self.checkpoint_path = tf.train.latest_checkpoint(self.train_dir)
if self.checkpoint_path is None:
log.warn("No checkpoint is given. Just random initialization :-)")
self.session.run(tf.global_variables_initializer())
else:
log.info("Checkpoint path : %s", self.checkpoint_path)
示例10: __init__
# 需要导入模块: from util import log [as 别名]
# 或者: from util.log import warn [as 别名]
def __init__(self, config, model, dataset):
self.config = config
self.model = model
self.train_dir = config.train_dir
log.info("self.train_dir = %s", self.train_dir)
# --- input ops ---
self.batch_size = config.batch_size
self.dataset = dataset
_, self.batch = create_input_ops(dataset, self.batch_size,
is_training=False,
shuffle=False)
self.global_step = tf.contrib.framework.get_or_create_global_step(graph=None)
self.step_op = tf.no_op(name='step_no_op')
# --- vars ---
all_vars = tf.trainable_variables()
log.warn("********* var ********** ")
slim.model_analyzer.analyze_vars(all_vars, print_info=True)
tf.set_random_seed(123)
session_config = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
device_count={'GPU': 1},
)
self.session = tf.Session(config=session_config)
# --- checkpoint and monitoring ---
self.saver = tf.train.Saver(max_to_keep=100)
self.checkpoint = config.checkpoint
if self.checkpoint is None and self.train_dir:
self.checkpoint = tf.train.latest_checkpoint(self.train_dir)
log.info("Checkpoint path : %s", self.checkpoint)
elif self.checkpoint is None:
log.warn("No checkpoint is given. Just random initialization :-)")
self.session.run(tf.global_variables_initializer())
else:
log.info("Checkpoint path : %s", self.checkpoint)
示例11: __init__
# 需要导入模块: from util import log [as 别名]
# 或者: from util.log import warn [as 别名]
def __init__(self,
config,
dataset):
self.config = config
self.train_dir = config.train_dir
log.info("self.train_dir = %s", self.train_dir)
# --- input ops ---
self.batch_size = config.batch_size
self.dataset = dataset
check_data_id(dataset, config.data_id)
_, self.batch = create_input_ops(dataset, self.batch_size,
data_id=config.data_id,
is_training=False,
shuffle=False)
# --- create model ---
Model = self.get_model_class(config.model)
log.infov("Using Model class : %s", Model)
self.model = Model(config)
self.global_step = tf.contrib.framework.get_or_create_global_step(graph=None)
self.step_op = tf.no_op(name='step_no_op')
tf.set_random_seed(1234)
session_config = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
device_count={'GPU': 1},
)
self.session = tf.Session(config=session_config)
# --- checkpoint and monitoring ---
self.saver = tf.train.Saver(max_to_keep=100)
self.checkpoint_path = config.checkpoint_path
if self.checkpoint_path is None and self.train_dir:
self.checkpoint_path = tf.train.latest_checkpoint(self.train_dir)
if self.checkpoint_path is None:
log.warn("No checkpoint is given. Just random initialization :-)")
self.session.run(tf.global_variables_initializer())
else:
log.info("Checkpoint path : %s", self.checkpoint_path)
示例12: build
# 需要导入模块: from util import log [as 别名]
# 或者: from util.log import warn [as 别名]
def build(self, is_train=True):
n = self.a_dim
conv_info = self.conv_info
# build loss and accuracy {{{
def build_loss(logits, labels):
# Cross-entropy loss
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)
# Classification accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return tf.reduce_mean(loss), accuracy
# }}}
# Classifier: takes images as input and outputs class label [B, m]
def C(img, q, scope='Classifier'):
with tf.variable_scope(scope) as scope:
log.warn(scope.name)
conv_1 = conv2d(img, conv_info[0], is_train, s_h=3, s_w=3, name='conv_1')
conv_2 = conv2d(conv_1, conv_info[1], is_train, s_h=3, s_w=3, name='conv_2')
conv_3 = conv2d(conv_2, conv_info[2], is_train, name='conv_3')
conv_4 = conv2d(conv_3, conv_info[3], is_train, name='conv_4')
conv_q = tf.concat([tf.reshape(conv_4, [self.batch_size, -1]), q], axis=1)
fc_1 = fc(conv_q, 256, name='fc_1')
fc_2 = fc(fc_1, 256, name='fc_2')
fc_2 = slim.dropout(fc_2, keep_prob=0.5, is_training=is_train, scope='fc_3/')
fc_3 = fc(fc_2, n, activation_fn=None, name='fc_3')
return fc_3
logits = C(self.img, self.q, scope='Classifier')
self.all_preds = tf.nn.softmax(logits)
self.loss, self.accuracy = build_loss(logits, self.a)
# Add summaries
def draw_iqa(img, q, target_a, pred_a):
fig, ax = tfplot.subplots(figsize=(6, 6))
ax.imshow(img)
ax.set_title(question2str(q))
ax.set_xlabel(answer2str(target_a)+answer2str(pred_a, 'Predicted'))
return fig
try:
tfplot.summary.plot_many('IQA/',
draw_iqa, [self.img, self.q, self.a, self.all_preds],
max_outputs=3,
collections=["plot_summaries"])
except:
pass
tf.summary.scalar("loss/accuracy", self.accuracy)
tf.summary.scalar("loss/cross_entropy", self.loss)
log.warn('Successfully loaded the model.')
示例13: __init__
# 需要导入模块: from util import log [as 别名]
# 或者: from util.log import warn [as 别名]
def __init__(self,
config,
dataset):
self.config = config
self.train_dir = config.train_dir
self.output_file = config.output_file
log.info("self.train_dir = %s", self.train_dir)
# --- input ops ---
self.batch_size = config.batch_size
self.dataset = dataset
_, self.batch = create_input_ops(dataset, self.batch_size,
is_training=False,
shuffle=False)
# --- create model ---
self.model = Model(config)
self.global_step = tf.contrib.framework.get_or_create_global_step(graph=None)
self.step_op = tf.no_op(name='step_no_op')
tf.set_random_seed(1234)
session_config = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
device_count={'GPU': 1},
)
self.session = tf.Session(config=session_config)
# --- checkpoint and monitoring ---
self.saver = tf.train.Saver(max_to_keep=100)
self.checkpoint_path = config.checkpoint_path
if self.checkpoint_path is None and self.train_dir:
self.checkpoint_path = tf.train.latest_checkpoint(self.train_dir)
if self.checkpoint_path is None:
log.warn("No checkpoint is given. Just random initialization :-)")
self.session.run(tf.global_variables_initializer())
else:
log.info("Checkpoint path : %s", self.checkpoint_path)
示例14: eval_run
# 需要导入模块: from util import log [as 别名]
# 或者: from util.log import warn [as 别名]
def eval_run(self):
# load checkpoint
if self.checkpoint:
self.saver.restore(self.session, self.checkpoint)
log.info("Loaded from checkpoint!")
log.infov("Start 1-epoch Inference and Evaluation")
log.info("# of examples = %d", len(self.dataset))
log.info("max_steps = %d", self.config.max_evaluation_steps)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(
self.session, coord=coord, start=True)
evaler = EvalManager()
try:
for s in xrange(self.config.max_evaluation_steps):
step, step_time, id, d_loss, g_loss, fake_images, \
real_images, output = self.run_single_step(self.batch)
self.log_step_message(s, d_loss, g_loss, step_time)
evaler.add_batch(id, output)
except Exception as e:
coord.request_stop(e)
coord.request_stop()
try:
coord.join(threads, stop_grace_period_secs=3)
except RuntimeError as e:
log.warn(str(e))
if self.config.write_summary_image:
n = int(np.sqrt(self.batch_size))
h, w, c = real_images.shape[1:]
summary_real = np.reshape(np.transpose(
np.reshape(real_images[:n*n],
[n, n*h, w, c]), [1, 0, 2, 3]), [n*h, n*w, c])
summary_fake = np.reshape(np.transpose(
np.reshape(fake_images[:n*n],
[n, n*h, w, c]), [1, 0, 2, 3]), [n*h, n*w, c])
summary_image = np.concatenate([summary_real, summary_fake], axis=1)
log.infov(" Writing a summary image: %s ...",
self.config.summary_image_name)
imwrite(self.config.summary_image_name, summary_image)
if self.config.output_file:
evaler.dump_result(self.config.output_file)