本文整理汇总了Python中tensorflow.contrib.framework.load_variable函数的典型用法代码示例。如果您正苦于以下问题:Python load_variable函数的具体用法?Python load_variable怎么用?Python load_variable使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了load_variable函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: bias_
def bias_(self):
hiddenlayer_bias = [load_variable(
self._model_dir, name=("dnn/hiddenlayer_%d/biases" % i))
for i, _ in enumerate(self._hidden_units)]
logits_bias = [load_variable(self._model_dir, name="dnn/logits/biases")]
centered_bias = [load_variable(self._model_dir, name=_CENTERED_BIAS_WEIGHT)]
return hiddenlayer_bias + logits_bias + centered_bias
示例2: bias_
def bias_(self):
hiddenlayer_bias = [load_variable(
self._model_dir, name=("dnn/hiddenlayer_%d/biases" % i))
for i, _ in enumerate(self._hidden_units)]
logits_bias = [load_variable(self._model_dir, name="dnn/logits/biases")]
if self._estimator.params["enable_centered_bias"]:
centered_bias = [
load_variable(self._model_dir, name=_CENTERED_BIAS_WEIGHT)]
else:
centered_bias = []
return hiddenlayer_bias + logits_bias + centered_bias
示例3: get_bias
def get_bias(self, model_dir):
"""Returns the bias of the model.
Args:
model_dir: Directory where model parameters, graph and etc. are saved.
Returns:
The bias weights created by this model.
"""
return [
load_variable(
model_dir, name=(self._scope+"/hiddenlayer_%d/biases" % i))
for i, _ in enumerate(self._hidden_units)
] + [load_variable(model_dir, name=(self._scope+"/logits/biases"))]
示例4: get_variable_value
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
示例5: get_variable_value
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
`Tensor` object.
"""
return load_variable(self._model_dir, name)
示例6: weights_
def weights_(self):
values = {}
optimizer_regex = r".*/"+self._optimizer.get_name() + r"(_\d)?$"
for name, _ in list_variables(self._model_dir):
if (name.startswith("linear/") and
name != "linear/bias_weight" and
not re.match(optimizer_regex, name)):
values[name] = load_variable(self._model_dir, name)
if len(values) == 1:
return values[list(values.keys())[0]]
return values
示例7: train
def train(self, env, first_update=35, update_frequency=10, episodes=None, steps=None,
hooks=None, max_steps=None, max_episodes=None):
"""Trains a model given an environment.
Args:
env: `Environment` instance.
first_update: `int`. First timestep to calculate the loss and train_op for the model.
update_frequency: `int`. The frequecncy at which to calcualate the loss and train_op.
steps: Number of steps for which to train model. If `None`, train forever.
'steps' works incrementally. If you call two times fit(steps=10) then
training occurs in total 20 steps. If you don't want to have incremental
behaviour please set `max_steps` instead. If set, `max_steps` must be
`None`.
hooks: List of `BaseMonitor` subclass instances.
Used for callbacks inside the training loop.
max_steps: Number of total steps for which to train model. If `None`,
train forever. If set, `steps` must be `None`.
max_episodes: Number of total episodes for which to train model. If `None`,
train forever. If set, `episodes` must be `None`.
Two calls to `fit(steps=100)` means 200 training iterations.
On the other hand, two calls to `fit(max_steps=100)` means
that the second call will not do any iteration since first call did all 100 steps.
Returns:
`self`, for chaining.
"""
if not self.memory.can_sample(first_update):
raise ValueError("Cannot update the model before gathering enough data")
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = self._prepare_train(
first_update, update_frequency, steps, hooks, max_steps, max_episodes)
loss = self._train_model(env=env, first_update=first_update,
update_frequency=update_frequency, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
示例8: _prepare_train
def _prepare_train(self, episodes=None, steps=None,
hooks=None, max_steps=None, max_episodes=None):
hooks = super(BaseAgent, self)._prepare_train(steps=steps, hooks=hooks, max_steps=max_steps)
if max_episodes is not None:
try:
start_episode = load_variable(self._model_dir, tf.GraphKeys.GLOBAL_EPISODE)
if max_episodes <= start_episode:
logging.info('Skipping training since max_episode has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = self._check_hooks(hooks)
if steps is not None or max_steps is not None:
hooks.append(plx_hooks.StopAtEpisodeHook(episodes, max_episodes))
return hooks
示例9: get_weights
def get_weights(self, model_dir):
"""Returns weights per feature of the linear part.
Args:
model_dir: Directory where model parameters, graph and etc. are saved.
Returns:
The weights created by this model (without the optimizer weights).
"""
all_variables = [name for name, _ in list_variables(model_dir)]
values = {}
optimizer_regex = r".*/" + self._get_optimizer().get_name() + r"(_\d)?$"
for name in all_variables:
if (name.startswith(self._scope + "/") and
name != self._scope + "/bias_weight" and
not re.match(optimizer_regex, name)):
values[name] = load_variable(model_dir, name)
if len(values) == 1:
return values[list(values.keys())[0]]
return values
示例10: train
def train(self, input_fn=None, steps=None, hooks=None, max_steps=None):
"""Trains a model given training data `x` predictions and `y` labels.
Args:
input_fn: Input function returning a tuple of:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
steps: Number of steps for which to train model. If `None`, train forever.
'steps' works incrementally. If you call two times fit(steps=10) then
training occurs in total 20 steps. If you don't want to have incremental
behaviour please set `max_steps` instead. If set, `max_steps` must be
`None`.
hooks: List of `BaseMonitor` subclass instances.
Used for callbacks inside the training loop.
max_steps: Number of total steps for which to train model. If `None`,
train forever. If set, `steps` must be `None`.
Two calls to `fit(steps=100)` means 200 training iterations.
On the other hand, two calls to `fit(max_steps=100)` means
that the second call will not do any iteration since first call did all 100 steps.
Returns:
`self`, for chaining.
"""
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = self._prepare_train(steps, hooks, max_steps)
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
示例11: _train_internal
def _train_internal(graph,
output_dir,
train_op,
loss_op,
global_step_tensor,
init_op,
init_feed_dict,
init_fn,
log_every_steps,
supervisor_is_chief,
supervisor_master,
supervisor_save_model_secs,
keep_checkpoint_max,
supervisor_save_summaries_steps,
feed_fn,
steps,
fail_on_nan_loss,
monitors,
max_steps):
"""See train."""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if not output_dir:
raise ValueError('Output directory should be non-empty %s.' % output_dir)
if train_op is None:
raise ValueError('Missing train_op.')
if loss_op is None:
raise ValueError('Missing loss_op.')
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
if global_step_tensor is None:
raise ValueError('No "global_step" was provided or found in the graph.')
# Get current step.
try:
start_step = load_variable(output_dir, global_step_tensor.name)
except (errors.NotFoundError, ValueError):
start_step = 0
summary_writer = (get_summary_writer(output_dir)
if supervisor_is_chief else None)
# Add default chief monitors if none were provided.
if not monitors:
monitors = monitors_lib.get_default_monitors(
loss_op=loss_op,
summary_op=logging_ops.get_summary_op(),
save_summary_steps=supervisor_save_summaries_steps,
summary_writer=summary_writer) if supervisor_is_chief else []
# TODO(ipolosukhin): Replace all functionality of Supervisor
# with Chief-Exclusive Monitors.
if not supervisor_is_chief:
# Prune list of monitor to the ones runnable on all workers.
monitors = [monitor for monitor in monitors if monitor.run_on_all_workers]
if max_steps is None:
max_steps = (start_step + steps) if steps else None
# Start monitors, can create graph parts.
for monitor in monitors:
monitor.begin(max_steps=max_steps)
supervisor = tf_supervisor.Supervisor(
graph,
init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT,
init_feed_dict=init_feed_dict,
is_chief=supervisor_is_chief,
logdir=output_dir,
saver=_make_saver(graph, keep_checkpoint_max),
global_step=global_step_tensor,
summary_op=None,
summary_writer=summary_writer,
save_model_secs=supervisor_save_model_secs,
init_fn=init_fn)
session = supervisor.PrepareSession(master=supervisor_master,
start_standard_services=True)
supervisor.StartQueueRunners(session)
with session:
get_current_step = lambda: session.run(global_step_tensor)
start_step = get_current_step()
last_step = start_step
last_log_step = start_step
loss_value = None
logging.info('Training steps [%d,%s)', last_step, 'inf'
if max_steps is None else str(max_steps))
excinfo = None
try:
while not supervisor.ShouldStop() and (
(max_steps is None) or (last_step < max_steps)):
start_time = time.time()
feed_dict = feed_fn() if feed_fn is not None else None
outputs, should_stop = _run_with_monitors(
session, last_step + 1, [train_op, loss_op], feed_dict, monitors)
#.........这里部分代码省略.........
示例12: _monitored_train
#.........这里部分代码省略.........
will not do any iteration since first call did all 100 steps.
Returns:
The final loss value.
Raises:
ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`
is not provided. See `tf.contrib.framework.get_global_step` for how we
look up the latter if not provided explicitly.
NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
evaluates to `NaN`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if not output_dir:
raise ValueError('Output directory should be non-empty %s.' % output_dir)
if train_op is None:
raise ValueError('Missing train_op.')
if loss_op is None:
raise ValueError('Missing loss_op.')
if hooks is None:
hooks = []
if not isinstance(hooks, list):
raise ValueError('Hooks should be a list.')
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
if global_step_tensor is None:
raise ValueError('No "global_step" was provided or found in the graph.')
if max_steps is not None:
try:
start_step = load_variable(output_dir, global_step_tensor.name)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return None
except: # pylint: disable=bare-except
pass
# Adapted SessionRunHooks such as ExportMonitor depend on the
# CheckpointSaverHook to be executed before they should be executed.
# The `hooks` param comprises of deprecated monitor hooks
# (such as ExportMonitor). Appending them after the basic_session_run_hooks.
all_hooks = []
with graph.as_default():
all_hooks.append(basic_session_run_hooks.NanTensorHook(
loss_op, fail_on_nan_loss=fail_on_nan_loss))
if log_every_steps > 0:
all_hooks.append(basic_session_run_hooks.LoggingTensorHook({
'loss': loss_op.name,
'step': global_step_tensor.name
}, every_n_iter=log_every_steps))
def make_saver():
return tf_saver.Saver(
sharded=True, max_to_keep=keep_checkpoint_max, defer_build=True,
write_version=saver_pb2.SaverDef.V1)
scaffold = monitored_session.Scaffold(
init_op=init_op,
init_feed_dict=init_feed_dict,
init_fn=init_fn,
saver=monitored_session.Scaffold.get_or_default('saver',
ops.GraphKeys.SAVERS,
make_saver))
示例13: bias_
def bias_(self):
return load_variable(self._model_dir, name="linear/bias_weight")
示例14: weights_
def weights_(self):
hiddenlayer_weights = [load_variable(
self._model_dir, name=("dnn/hiddenlayer_%d/weights" % i))
for i, _ in enumerate(self._hidden_units)]
logits_weights = [load_variable(self._model_dir, name="dnn/logits/weights")]
return hiddenlayer_weights + logits_weights
示例15: get_variable_value
def get_variable_value(self, name):
return load_variable(self.model_dir, name)