本文整理汇总了Python中tensorflow.get_logger方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.get_logger方法的具体用法?Python tensorflow.get_logger怎么用?Python tensorflow.get_logger使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.get_logger方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: should_stop
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_logger [as 别名]
def should_stop(self):
"""Returns ``True`` if early stopping conditions are met."""
if self._early_stopping is None:
return False
target_metric = self._early_stopping.metric
higher_is_better = self._is_higher_better_for_metric(target_metric)
metrics = self._get_metric_history(target_metric)
should_stop = early_stop(
metrics,
self._early_stopping.steps,
min_improvement=self._early_stopping.min_improvement,
higher_is_better=higher_is_better)
if should_stop:
tf.get_logger().warning(
"Evaluation metric '%s' did not improve more than %f in the last %d evaluations",
target_metric,
self._early_stopping.min_improvement,
self._early_stopping.steps)
return should_stop
示例2: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_logger [as 别名]
def __init__(self, variables, step, decay=0.9999):
"""Initializes the moving average object.
Args:
variables: The list of variable for which to maintain a moving average.
step: The training step counter as a ``tf.Variable``.
decay: The decay rate of the exponential moving average. Usually close to
1, e.g. 0.9999, see the complete formula on
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage.
Raises:
TypeError: is :obj:`step` is not a ``tf.Variable``.
"""
if not isinstance(step, tf.Variable):
raise TypeError("step should be a tf.Variable")
if decay < 0.9 or decay > 1:
tf.get_logger().warning("Moving average decay should be close to 1 (e.g. 0.9999) but you "
"passed %f, is it correct? See https://www.tensorflow.org/api_docs"
"/python/tf/train/ExponentialMovingAverage for details about the "
"formula and recommended decay values.")
self._ema = tf.train.ExponentialMovingAverage(decay, num_updates=step)
self._variables = variables
self.update()
示例3: _check_static_batch_beam_maybe
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_logger [as 别名]
def _check_static_batch_beam_maybe(shape, batch_size, beam_width):
"""Raises an exception if dimensions are known statically and can not be
reshaped to [batch_size, beam_size, -1]."""
reshaped_shape = tf.TensorShape([batch_size, beam_width, None])
assert len(shape.dims) > 0
if batch_size is None or shape[0] is None:
return True # not statically known => no check
if shape[0] == batch_size * beam_width:
return True # flattened, matching
has_second_dim = shape.ndims >= 2 and shape[1] is not None
if has_second_dim and shape[0] == batch_size and shape[1] == beam_width:
return True # non-flattened, matching
# Otherwise we could not find a match and warn:
tf.get_logger().warn(
"TensorArray reordering expects elements to be "
"reshapable to %s which is incompatible with the "
"current shape %s. Consider setting "
"reorder_tensor_arrays to False to disable TensorArray "
"reordering during the beam search." % (reshaped_shape, shape)
)
return False
示例4: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_logger [as 别名]
def main(m_path, img_path, out_dir):
logger = get_logger("inference")
logger.info(f"generating image from {img_path}")
imported = tf.saved_model.load(m_path)
f = imported.signatures["serving_default"]
img = np.array(Image.open(img_path).convert("RGB"))
img = np.expand_dims(img, 0).astype(np.float32) / 127.5 - 1
out = f(tf.constant(img))['output_1']
out = ((out.numpy().squeeze() + 1) * 127.5).astype(np.uint8)
if out_dir != "" and not os.path.isdir(out_dir):
os.makedirs(out_dir)
if out_dir == "":
out_dir = "."
out_path = os.path.join(out_dir, os.path.split(img_path)[1])
imwrite(out_path, out)
logger.info(f"generated image saved to {out_path}")
示例5: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_logger [as 别名]
def main(m_path, img_path, out_dir, light=False):
logger = get_logger("inference")
logger.info(f"generating image from {img_path}")
try:
g = Generator(light=light)
g.load_weights(tf.train.latest_checkpoint(m_path))
except ValueError as e:
logger.error(e)
logger.error("Failed to load specified weight.")
logger.error("If you trained your model with --light, "
"consider adding --light when executing this script; otherwise, "
"do not add --light when executing this script.")
exit(1)
img = np.array(Image.open(img_path).convert("RGB"))
img = np.expand_dims(img, 0).astype(np.float32) / 127.5 - 1
out = ((g(img).numpy().squeeze() + 1) * 127.5).astype(np.uint8)
if out_dir != "" and not os.path.isdir(out_dir):
os.makedirs(out_dir)
if out_dir == "":
out_dir = "."
out_path = os.path.join(out_dir, os.path.split(img_path)[1])
imwrite(out_path, out)
logger.info(f"generated image saved to {out_path}")
示例6: test_hyperband_integration
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_logger [as 别名]
def test_hyperband_integration(tmp_dir):
tuner = hyperband_module.Hyperband(
objective='val_loss',
hypermodel=build_model,
hyperband_iterations=2,
max_epochs=6,
factor=3,
directory=tmp_dir)
x, y = np.ones((2, 5)), np.ones((2, 1))
tuner.search(x, y, validation_data=(x, y))
# Make sure Oracle is registering new HPs.
updated_hps = tuner.oracle.get_space().values
assert 'units1' in updated_hps
assert 'bias1' in updated_hps
tf.get_logger().setLevel(logging.ERROR)
best_score = tuner.oracle.get_best_trials()[0].score
best_model = tuner.get_best_models()[0]
assert best_model.evaluate(x, y) == best_score
示例7: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_logger [as 别名]
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("run", choices=["train", "translate"],
help="Run type.")
parser.add_argument("--src", required=True,
help="Path to the source file.")
parser.add_argument("--tgt",
help="Path to the target file.")
parser.add_argument("--src_vocab", required=True,
help="Path to the source vocabulary.")
parser.add_argument("--tgt_vocab", required=True,
help="Path to the target vocabulary.")
parser.add_argument("--model_dir", default="checkpoint",
help="Directory where checkpoint are written.")
args = parser.parse_args()
data_config = {
"source_vocabulary": args.src_vocab,
"target_vocabulary": args.tgt_vocab
}
model.initialize(data_config)
checkpoint_manager = tf.train.CheckpointManager(checkpoint, args.model_dir, max_to_keep=5)
if checkpoint_manager.latest_checkpoint is not None:
tf.get_logger().info("Restoring parameters from %s", checkpoint_manager.latest_checkpoint)
checkpoint.restore(checkpoint_manager.latest_checkpoint)
if args.run == "train":
train(args.src, args.tgt, checkpoint_manager)
elif args.run == "translate":
translate(args.src)
示例8: _record_results
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_logger [as 别名]
def _record_results(self, step, results):
# Clear history for steps that are greater than step.
while self._metrics_history and self._metrics_history[-1][0] > step:
self._metrics_history.pop()
self._metrics_history.append((step, dict(results)))
tf.get_logger().info(
"Evaluation result for step %d: %s",
step,
" ; ".join("%s = %f" % (k, v) for k, v in results.items()))
with self._summary_writer.as_default():
for key, value in results.items():
tf.summary.scalar("%s/%s" % (_SUMMARIES_SCOPE, key), value, step=step)
self._summary_writer.flush()
示例9: _maybe_export
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_logger [as 别名]
def _maybe_export(self, step, results):
if self._export_on_best is None or not self.is_best(self._export_on_best):
return
export_dir = os.path.join(self._export_dir, str(step))
tf.get_logger().info("Exporting model to %s (best %s so far: %f)",
export_dir, self._export_on_best, results[self._export_on_best])
self._model.export(export_dir, exporter=self._exporter)
示例10: _set_log_level
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_logger [as 别名]
def _set_log_level(log_level):
tf.get_logger().setLevel(log_level)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = str(_PYTHON_TO_TENSORFLOW_LOGGING_LEVEL[log_level])
示例11: save
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_logger [as 别名]
def save(self, step=None):
"""Saves a checkpoint.
Args:
step: The step to save for. If ``None``, get the value from ``optimizer.iterations``.
Returns:
The path to the saved checkpoint.
"""
if step is None:
step = self._optimizer.iterations
path = self._checkpoint_manager.save(checkpoint_number=step)
tf.get_logger().info("Saved checkpoint %s", path)
return path
示例12: average_checkpoints_into_layer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_logger [as 别名]
def average_checkpoints_into_layer(checkpoints, layer, layer_prefix):
"""Updates the layer weights with their average value in the checkpoints.
Args:
checkpoints: A non empty list of checkpoint paths.
layer: A ``tf.keras.layers.Layer`` instance.
layer_prefix: The name/scope that prefixes the layer variables names in the
checkpoints.
Raises:
ValueError: if :obj:`checkpoints` is empty.
ValueError: if :obj:`layer` is not already built.
See Also:
:func:`opennmt.utils.average_checkpoints`
"""
if not checkpoints:
raise ValueError("There should be at least one checkpoint")
if not layer.built:
raise ValueError("The layer should be built before calling this function")
# Reset the layer variables to 0.
for variable in layer.variables:
variable.assign(tf.zeros_like(variable))
# Get a map from variable names in the checkpoint to variables in the layer.
_, names_to_variables = misc.get_variables_name_mapping(layer, root_key=layer_prefix)
num_checkpoints = len(checkpoints)
tf.get_logger().info("Averaging %d checkpoints...", num_checkpoints)
for checkpoint_path in checkpoints:
tf.get_logger().info("Reading checkpoint %s...", checkpoint_path)
reader = tf.train.load_checkpoint(checkpoint_path)
for path in reader.get_variable_to_shape_map().keys():
if not path.startswith(layer_prefix) or ".OPTIMIZER_SLOT" in path:
continue
variable = names_to_variables[path]
value = reader.get_tensor(path)
variable.assign_add(value / num_checkpoints)
示例13: export
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_logger [as 别名]
def export(self, model, export_dir):
"""Exports :obj:`model` to :obj:`export_dir`.
Raises:
ValueError: if :obj:`model` is not supported by this exporter.
"""
self._export_model(model, export_dir)
with tempfile.TemporaryDirectory() as tmp_dir:
extra_assets = model.export_assets(tmp_dir)
if extra_assets:
assets_extra = os.path.join(export_dir, "assets.extra")
tf.io.gfile.makedirs(assets_extra)
for filename, path in extra_assets.items():
tf.io.gfile.copy(path, os.path.join(assets_extra, filename), overwrite=True)
tf.get_logger().info("Extra assets written to: %s", assets_extra)
示例14: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_logger [as 别名]
def __init__(self,
model,
config,
auto_config=False,
mixed_precision=False,
seed=None):
"""Initializes the runner parameters.
Args:
model: A :class:`opennmt.models.Model` instance to run or a callable that
returns such instance.
config: The run configuration.
auto_config: If ``True``, use automatic configuration values defined by
:obj:`model`.
mixed_precision: Enable mixed precision.
seed: The random seed to set.
Raises:
TypeError: if :obj:`model` is not a :class:`opennmt.models.Model` instance
or a callable.
"""
if isinstance(model, models.Model):
self._model = model
self._model_fn = lambda: misc.clone_layer(model)
elif callable(model):
self._model = model()
self._model_fn = model
else:
raise TypeError("model should be a opennmt.models.Model instance or a callable")
tf.get_logger().info("Using model:\n%s", self._model)
self._optimizer = None
self._config = copy.deepcopy(config)
self._auto_config = auto_config
self._mixed_precision = mixed_precision
if mixed_precision:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
if seed is not None:
np.random.seed(seed)
random.seed(seed)
tf.random.set_seed(seed)
示例15: compute_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_logger [as 别名]
def compute_loss(self, outputs, labels, training=True):
params = self.params
if not isinstance(outputs, dict):
outputs = dict(logits=outputs)
logits = outputs["logits"]
noisy_logits = outputs.get("noisy_logits")
attention = outputs.get("attention")
if noisy_logits is not None and params.get("contrastive_learning"):
return losses.max_margin_loss(
logits,
labels["ids_out"],
labels["length"],
noisy_logits,
labels["noisy_ids_out"],
labels["noisy_length"],
eta=params.get("max_margin_eta", 0.1))
loss, loss_normalizer, loss_token_normalizer = losses.cross_entropy_sequence_loss(
logits,
labels["ids_out"],
labels["length"],
label_smoothing=params.get("label_smoothing", 0.0),
average_in_time=params.get("average_loss_in_time", False),
training=training)
if training:
gold_alignments = labels.get("alignment")
guided_alignment_type = params.get("guided_alignment_type")
if gold_alignments is not None and guided_alignment_type is not None:
if attention is None:
tf.get_logger().warning("This model did not return attention vectors; "
"guided alignment will not be applied")
else:
loss += losses.guided_alignment_cost(
attention[:, :-1], # Do not constrain last timestep.
gold_alignments,
sequence_length=self.labels_inputter.get_length(labels, ignore_special_tokens=True),
cost_type=guided_alignment_type,
weight=params.get("guided_alignment_weight", 1))
return loss, loss_normalizer, loss_token_normalizer