本文整理匯總了Python中tensorflow.python.ops.resources.initialize_resources方法的典型用法代碼示例。如果您正苦於以下問題:Python resources.initialize_resources方法的具體用法?Python resources.initialize_resources怎麽用?Python resources.initialize_resources使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.ops.resources
的用法示例。
在下文中一共展示了resources.initialize_resources方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _create_fake_checkpoint_with_tree_ensemble_proto
# 需要導入模塊: from tensorflow.python.ops import resources [as 別名]
# 或者: from tensorflow.python.ops.resources import initialize_resources [as 別名]
def _create_fake_checkpoint_with_tree_ensemble_proto(self, est,
tree_ensemble_text):
with tf.Graph().as_default():
with ops.name_scope('boosted_trees') as name:
tree_ensemble = boosted_trees_ops.TreeEnsemble(name=name)
tree_ensemble_proto = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(tree_ensemble_text, tree_ensemble_proto)
stamp_token, _ = tree_ensemble.serialize()
restore_op = tree_ensemble.deserialize(
stamp_token, tree_ensemble_proto.SerializeToString())
with tf.compat.v1.Session() as sess:
resources.initialize_resources(resources.shared_resources()).run()
restore_op.run()
saver = tf.compat.v1.train.Saver()
save_path = os.path.join(est.model_dir, 'model.ckpt')
saver.save(sess, save_path)
示例2: testBasicResourceVariable
# 需要導入模塊: from tensorflow.python.ops import resources [as 別名]
# 或者: from tensorflow.python.ops.resources import initialize_resources [as 別名]
def testBasicResourceVariable(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
sgd_op = tf.train.GradientDescentOptimizer(3.0).apply_gradients(zip(
[grads0, grads1], [var0, var1]))
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], var0.eval())
self.assertAllCloseAccordingToType(
[3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], var1.eval())
示例3: testMinimizeResourceVariable
# 需要導入模塊: from tensorflow.python.ops import resources [as 別名]
# 或者: from tensorflow.python.ops.resources import initialize_resources [as 別名]
def testMinimizeResourceVariable(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable(
[[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable(
[3.0], dtype=dtype)
x = tf.constant([[4.0], [5.0]], dtype=dtype)
pred = tf.matmul(var0, x) + var1
loss = pred*pred
sgd_op = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
self.assertAllCloseAccordingToType([3.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
np_grad = 2 * np_pred
self.assertAllCloseAccordingToType(
[[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
self.assertAllCloseAccordingToType(
[3.0 - np_grad], var1.eval())
示例4: testMinimizeSparseResourceVariable
# 需要導入模塊: from tensorflow.python.ops import resources [as 別名]
# 或者: from tensorflow.python.ops.resources import initialize_resources [as 別名]
def testMinimizeSparseResourceVariable(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable(
[[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable(
[3.0], dtype=dtype)
x = tf.constant([[4.0], [5.0]], dtype=dtype)
pred = tf.matmul(tf.nn.embedding_lookup([var0], [0]), x)
pred = tf.matmul(var0, x) + var1
loss = pred*pred
sgd_op = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
self.assertAllCloseAccordingToType([3.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
np_grad = 2 * np_pred
self.assertAllCloseAccordingToType(
[[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
self.assertAllCloseAccordingToType(
[3.0 - np_grad], var1.eval())
示例5: finalize
# 需要導入模塊: from tensorflow.python.ops import resources [as 別名]
# 或者: from tensorflow.python.ops.resources import initialize_resources [as 別名]
def finalize(self):
"""Creates operations if needed and finalizes the graph."""
if self._init_op is None:
def default_init_op():
return control_flow_ops.group(
variables.global_variables_initializer(),
resources.initialize_resources(resources.shared_resources()))
self._init_op = Scaffold.get_or_default(
'init_op',
ops.GraphKeys.INIT_OP,
default_init_op)
if self._ready_op is None:
def default_ready_op():
return array_ops.concat(
0,
[variables.report_uninitialized_variables(),
resources.report_uninitialized_resources()])
self._ready_op = Scaffold.get_or_default(
'ready_op', ops.GraphKeys.READY_OP,
default_ready_op)
if self._local_init_op is None:
self._local_init_op = Scaffold.get_or_default(
'local_init_op', ops.GraphKeys.LOCAL_INIT_OP,
Scaffold._default_local_init_op)
if self._summary_op is None:
self._summary_op = Scaffold.get_or_default('summary_op',
ops.GraphKeys.SUMMARY_OP,
summary.merge_all)
# pylint: disable=g-long-lambda
if self._saver is None:
self._saver = Scaffold.get_or_default(
'saver',
ops.GraphKeys.SAVERS,
lambda: training_saver.Saver(sharded=True, allow_empty=True,
write_version=saver_pb2.SaverDef.V2))
# pylint: enable=g-long-lambda
self._saver.build()
ops.get_default_graph().finalize()
return self
示例6: testInitialize
# 需要導入模塊: from tensorflow.python.ops import resources [as 別名]
# 或者: from tensorflow.python.ops.resources import initialize_resources [as 別名]
def testInitialize(self):
with self.test_session():
handle = test_ops.stub_resource_handle_op(container="a", shared_name="b")
resources.register_resource(
handle=handle,
create_op=test_ops.resource_create_op(handle),
is_initialized_op=test_ops.resource_initialized_op(handle))
self.assertEquals(len(resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 1)
resources.initialize_resources(resources.shared_resources()).run()
self.assertEquals(len(resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 0)
示例7: _init_graph
# 需要導入模塊: from tensorflow.python.ops import resources [as 別名]
# 或者: from tensorflow.python.ops.resources import initialize_resources [as 別名]
def _init_graph(self):
# Initialize all weights
if not self._is_initialized:
self.saver = tf.train.Saver()
init_vars = tf.group(tf.global_variables_initializer(),
resources.initialize_resources(
resources.shared_resources()))
self.session.run(init_vars)
self._is_initialized = True
# Restore weights if needed
if self._to_be_restored:
self.saver = tf.train.Saver()
self.saver.restore(self.session, self._to_be_restored)
self._to_be_restored = False
示例8: finalize
# 需要導入模塊: from tensorflow.python.ops import resources [as 別名]
# 或者: from tensorflow.python.ops.resources import initialize_resources [as 別名]
def finalize(self):
"""Creates operations if needed and finalizes the graph."""
if self._init_op is None:
def default_init_op():
return control_flow_ops.group(
variables.global_variables_initializer(),
resources.initialize_resources(resources.shared_resources()))
self._init_op = Scaffold.get_or_default(
'init_op',
ops.GraphKeys.INIT_OP,
default_init_op)
if self._ready_op is None:
def default_ready_op():
return array_ops.concat([
variables.report_uninitialized_variables(),
resources.report_uninitialized_resources()
], 0)
self._ready_op = Scaffold.get_or_default(
'ready_op', ops.GraphKeys.READY_OP,
default_ready_op)
if self._ready_for_local_init_op is None:
def default_ready_for_local_init_op():
return variables.report_uninitialized_variables(
variables.global_variables())
self._ready_for_local_init_op = Scaffold.get_or_default(
'ready_for_local_init_op', ops.GraphKeys.READY_FOR_LOCAL_INIT_OP,
default_ready_for_local_init_op)
if self._local_init_op is None:
self._local_init_op = Scaffold.get_or_default(
'local_init_op', ops.GraphKeys.LOCAL_INIT_OP,
Scaffold._default_local_init_op)
if self._summary_op is None:
self._summary_op = Scaffold.get_or_default('summary_op',
ops.GraphKeys.SUMMARY_OP,
summary.merge_all)
# pylint: disable=g-long-lambda
if self._saver is None:
self._saver = training_saver._get_saver_or_default() # pylint: disable=protected-access
# pylint: enable=g-long-lambda
self._saver.build()
ops.get_default_graph().finalize()
return self
示例9: run_feeds_iter
# 需要導入模塊: from tensorflow.python.ops import resources [as 別名]
# 或者: from tensorflow.python.ops.resources import initialize_resources [as 別名]
def run_feeds_iter(output_dict, feed_dicts, restore_checkpoint_path=None):
"""Run `output_dict` tensors with each input in `feed_dicts`.
If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
init all variables.
Args:
output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dicts: Iterable of `dict` objects of input values to feed.
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
Yields:
A sequence of dicts of values read from `output_dict` tensors, one item
yielded for each item in `feed_dicts`. Keys are the same as `output_dict`,
values are the results read from the corresponding `Tensor` in
`output_dict`.
Raises:
ValueError: if `output_dict` or `feed_dicts` is None or empty.
"""
if not output_dict:
raise ValueError('output_dict is invalid: %s.' % output_dict)
if not feed_dicts:
raise ValueError('feed_dicts is invalid: %s.' % feed_dicts)
graph = contrib_ops.get_graph_from_inputs(output_dict.values())
with graph.as_default() as g:
with tf_session.Session('') as session:
session.run(
resources.initialize_resources(resources.shared_resources() +
resources.local_resources()))
if restore_checkpoint_path:
_restore_from_checkpoint(session, g, restore_checkpoint_path)
else:
session.run(variables.global_variables_initializer())
session.run(variables.local_variables_initializer())
session.run(lookup_ops.tables_initializer())
coord = coordinator.Coordinator()
threads = None
try:
threads = queue_runner.start_queue_runners(session, coord=coord)
for f in feed_dicts:
yield session.run(output_dict, f)
finally:
coord.request_stop()
if threads:
coord.join(threads, stop_grace_period_secs=120)
示例10: finalize
# 需要導入模塊: from tensorflow.python.ops import resources [as 別名]
# 或者: from tensorflow.python.ops.resources import initialize_resources [as 別名]
def finalize(self):
"""Creates operations if needed and finalizes the graph."""
if self._init_op is None:
def default_init_op():
return control_flow_ops.group(
variables.global_variables_initializer(),
resources.initialize_resources(resources.shared_resources()))
self._init_op = Scaffold.get_or_default(
'init_op',
ops.GraphKeys.INIT_OP,
default_init_op)
if self._ready_op is None:
def default_ready_op():
return array_ops.concat([
variables.report_uninitialized_variables(),
resources.report_uninitialized_resources()
], 0)
self._ready_op = Scaffold.get_or_default(
'ready_op', ops.GraphKeys.READY_OP,
default_ready_op)
if self._ready_for_local_init_op is None:
def default_ready_for_local_init_op():
return variables.report_uninitialized_variables(
variables.global_variables())
self._ready_for_local_init_op = Scaffold.get_or_default(
'ready_for_local_init_op', ops.GraphKeys.READY_FOR_LOCAL_INIT_OP,
default_ready_for_local_init_op)
if self._local_init_op is None:
self._local_init_op = Scaffold.get_or_default(
'local_init_op', ops.GraphKeys.LOCAL_INIT_OP,
Scaffold._default_local_init_op)
if self._summary_op is None:
self._summary_op = Scaffold.get_or_default('summary_op',
ops.GraphKeys.SUMMARY_OP,
summary.merge_all)
# pylint: disable=g-long-lambda
if self._saver is None:
self._saver = Scaffold.get_or_default(
'saver',
ops.GraphKeys.SAVERS,
lambda: training_saver.Saver(sharded=True, allow_empty=True,
write_version=saver_pb2.SaverDef.V2))
# pylint: enable=g-long-lambda
self._saver.build()
ops.get_default_graph().finalize()
return self
示例11: run_feeds_iter
# 需要導入模塊: from tensorflow.python.ops import resources [as 別名]
# 或者: from tensorflow.python.ops.resources import initialize_resources [as 別名]
def run_feeds_iter(output_dict, feed_dicts, restore_checkpoint_path=None):
"""Run `output_dict` tensors with each input in `feed_dicts`.
If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
init all variables.
Args:
output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dicts: Iterable of `dict` objects of input values to feed.
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
Yields:
A sequence of dicts of values read from `output_dict` tensors, one item
yielded for each item in `feed_dicts`. Keys are the same as `output_dict`,
values are the results read from the corresponding `Tensor` in
`output_dict`.
Raises:
ValueError: if `output_dict` or `feed_dicts` is None or empty.
"""
if not output_dict:
raise ValueError('output_dict is invalid: %s.' % output_dict)
if not feed_dicts:
raise ValueError('feed_dicts is invalid: %s.' % feed_dicts)
graph = contrib_ops.get_graph_from_inputs(output_dict.values())
with graph.as_default() as g:
with tf_session.Session('') as session:
session.run(
resources.initialize_resources(resources.shared_resources() +
resources.local_resources()))
if restore_checkpoint_path:
_restore_from_checkpoint(session, g, restore_checkpoint_path)
else:
session.run(variables.global_variables_initializer())
session.run(variables.local_variables_initializer())
session.run(data_flow_ops.tables_initializer())
coord = coordinator.Coordinator()
threads = None
try:
threads = queue_runner.start_queue_runners(session, coord=coord)
for f in feed_dicts:
yield session.run(output_dict, f)
finally:
coord.request_stop()
if threads:
coord.join(threads, stop_grace_period_secs=120)
示例12: __init__
# 需要導入模塊: from tensorflow.python.ops import resources [as 別名]
# 或者: from tensorflow.python.ops.resources import initialize_resources [as 別名]
def __init__(self, options):
# Current implementation of Quantiles Ops require mutation of resources
# which is "impure" and necessitates atomicity. This lock enforces those
# invariants, by protecting access to all callables of this graph state.
#
# TODO(KesterTong): Consider making this lock private and having methods of
# this object only grab it when they need it. When that is done, remember to
# a) Annotate this class as Thread-safe (as opposed to thread-hostile) and
# update its documentation.
# b) Make all thread-hostile methods private and remove "thread_hostile"
# from their name.
# c) Expose the right public methods.
#
# TODO(KesterTong): Perhaps TF Quantiles Ops could be changed so that they
# are truly pure. That would allow sharing the _QuantilesGraphState without
# a need for locking.
self.lock = threading.Lock()
# Create a new session with a new graph for quantile ops.
with tf.compat.v1.Graph().as_default() as graph:
self._session = tf.compat.v1.Session(
graph=graph, config=options.tf_config)
# We will instantiate a single resource for the purpose of computing the
# Quantiles operations.
self._resource = self._create_resource(name='quantiles_combiner',
eps=options.epsilon,
max_elements=1 << 32,
num_streams=options.num_features)
self._session.run(
resources.initialize_resources(resources.shared_resources()))
self.thread_hostile_add_input_callable = self._make_add_input_callable(
self._resource, options)
self.thread_hostile_get_buckets_callable = (
self._make_get_buckets_callable(self._resource, options))
self.thread_hostile_merge_summary_callable = (
self._make_merge_summary_callable(self._resource, options))
# Create op to flush summaries and return a list representing the
# summaries that were added to all accumulators so far.
self.thread_hostile_flush_summary_callable = self._session.make_callable(
fetches=tf.raw_ops.BoostedTreesFlushQuantileSummaries(
quantile_stream_resource_handle=self._resource,
num_features=options.num_features))
graph.finalize()
# We generate an empty summary by calling self._flush_summary_callable and
# cache it for efficiency. Caching is safe (and as such the cache is public)
# since it is immutable.
with self.lock:
self.empty_summary = self.thread_hostile_flush_summary_callable()
示例13: finalize
# 需要導入模塊: from tensorflow.python.ops import resources [as 別名]
# 或者: from tensorflow.python.ops.resources import initialize_resources [as 別名]
def finalize(self):
"""Creates operations if needed and finalizes the graph."""
if self._init_op is None:
def default_init_op():
return tf.group(
tf.global_variables_initializer(),
resources.initialize_resources(resources.shared_resources()))
self._init_op = TransferScaffold.get_or_default(
'init_op',
tf.GraphKeys.INIT_OP,
default_init_op)
if self._ready_op is None:
def default_ready_op():
return tf.concat([
tf.report_uninitialized_variables(),
resources.report_uninitialized_resources()
], 0)
self._ready_op = TransferScaffold.get_or_default(
'ready_op', tf.GraphKeys.READY_OP,
default_ready_op)
if self._ready_for_local_init_op is None:
def default_ready_for_local_init_op():
return tf.report_uninitialized_variables(
tf.global_variables())
self._ready_for_local_init_op = TransferScaffold.get_or_default(
'ready_for_local_init_op', tf.GraphKeys.READY_FOR_LOCAL_INIT_OP,
default_ready_for_local_init_op)
if self._local_init_op is None:
self._local_init_op = TransferScaffold.get_or_default(
'local_init_op', tf.GraphKeys.LOCAL_INIT_OP,
TransferScaffold.default_local_init_op)
if self._summary_op is None:
self._summary_op = TransferScaffold.get_or_default(
'summary_op', tf.GraphKeys.SUMMARY_OP, tf.summary.merge_all)
# pylint: disable=g-long-lambda
if self._saver is None:
self._saver = training_saver._get_saver_or_default() # pylint: disable=protected-access
# pylint: enable=g-long-lambda
self._saver.build()
# ops.get_default_graph().finalize()
# logging.info('Graph was finalized.')
return self
示例14: _get_train_op_and_ensemble_and_boundaries
# 需要導入模塊: from tensorflow.python.ops import resources [as 別名]
# 或者: from tensorflow.python.ops.resources import initialize_resources [as 別名]
def _get_train_op_and_ensemble_and_boundaries(self,
head,
config,
is_classification,
train_in_memory,
center_bias=False,
use_numeric_columns=False):
"""Calls bt_model_fn() and returns the train_op and ensemble_serialzed."""
features, labels = _make_train_input_fn(is_classification)()
tree_hparams = boosted_trees._TreeHParams( # pylint:disable=protected-access
n_trees=2,
max_depth=2,
learning_rate=0.1,
l1=0.,
l2=0.01,
tree_complexity=0.,
min_node_weight=0.,
center_bias=center_bias,
pruning_mode='none',
quantile_sketch_epsilon=0.01)
if use_numeric_columns:
columns = self._numeric_feature_columns
num_resources = 2
else:
columns = self._feature_columns
num_resources = 1
estimator_spec = boosted_trees._bt_model_fn( # pylint:disable=protected-access
features=features,
labels=labels,
mode=ModeKeys.TRAIN,
head=head,
feature_columns=columns,
tree_hparams=tree_hparams,
example_id_column_name=EXAMPLE_ID_COLUMN,
n_batches_per_layer=1,
config=config,
train_in_memory=train_in_memory)
resources.initialize_resources(resources.shared_resources()).run()
tf.compat.v1.initializers.global_variables().run()
tf.compat.v1.initializers.local_variables().run()
# Gets the train_op and serialized proto of the ensemble.
shared_resources = resources.shared_resources()
self.assertEqual(num_resources, len(shared_resources))
train_op = estimator_spec.train_op
with tf.control_dependencies([train_op]):
_, ensemble_serialized = (
gen_boosted_trees_ops.boosted_trees_serialize_ensemble(
shared_resources[0].handle))
if use_numeric_columns:
bucket_boundaries = boosted_trees_ops.get_bucket_boundaries(
shared_resources[1].handle, num_features=len(columns))
else:
bucket_boundaries = []
return train_op, ensemble_serialized, bucket_boundaries