本文整理汇总了Python中tensorflow.python.framework.ops.default_session方法的典型用法代码示例。如果您正苦于以下问题:Python ops.default_session方法的具体用法?Python ops.default_session怎么用?Python ops.default_session使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.framework.ops
的用法示例。
在下文中一共展示了ops.default_session方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import default_session [as 别名]
def run(self):
self.dataflow.reset_state()
with ops.default_session(self.sess):
try:
while True:
for dp in self.dataflow.get_data():
if self.coord.should_stop():
return
feed = dict(zip(self.input_vars, dp))
self.op.run(feed_dict=feed)
except tf.errors.CancelledError as e:
pass
except Exception:
logger.exception("Exception in EnqueueThread:")
finally:
try:
self.sess.run(self.close_op)
except RuntimeError: # session already closed
pass
self.coord.request_stop()
logger.info("Enqueue Thread Exited.")
示例2: as_default
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import default_session [as 别名]
def as_default(self):
return ops.default_session(self)
示例3: _start_concurrency
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import default_session [as 别名]
def _start_concurrency(self):
"""
Run all threads before starting training
"""
logger.info("Starting all threads & procs ...")
tf.train.start_queue_runners(sess=self.sess.get(), coord=self.coord, daemon=True, start=True)
#with self.sess.as_default():
with ops.default_session(self.sess):
# avoid sigint get handled by other processes
start_proc_mask_signal(self._extra_threads_procs)
示例4: as_default
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import default_session [as 别名]
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
@{tf.Operation.run} or @{tf.Tensor.eval} should be executed in
this session.
```python
c = tf.constant(..)
sess = tf.Session()
with sess.as_default():
assert tf.get_default_session() is sess
print(c.eval())
```
To get the current default session, use @{tf.get_default_session}.
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
*N.B.* Entering a `with sess.as_default():` block does not affect
the current default graph. If you are using multiple graphs, and
`sess.graph` is different from the value of @{tf.get_default_graph},
you must explicitly enter a `with sess.graph.as_default():` block
to make `sess.graph` the default graph.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
示例5: as_default
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import default_session [as 别名]
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
[`Operation.run()`](../../api_docs/python/framework.md#Operation.run) or
[`Tensor.eval()`](../../api_docs/python/framework.md#Tensor.eval) should be
executed in this session.
```python
c = tf.constant(..)
sess = tf.Session()
with sess.as_default():
assert tf.get_default_session() is sess
print(c.eval())
```
To get the current default session, use
[`tf.get_default_session()`](#get_default_session).
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default graph is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
示例6: main_loop
# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import default_session [as 别名]
def main_loop(self):
# some final operations that might modify the graph
logger.info("[{}] Initializing graph variables ...".format(os.environ['SLURMD_NODENAME']))
#self.sess.run(tf.initialize_all_variables())
self.config.session_init.init(self.sess)
# tf.get_default_graph().finalize()
callbacks = self.config.callbacks
logger.info("[{}] Starting concurrency...".format(os.environ['SLURMD_NODENAME']))
self._start_concurrency()
#with self.sess.as_default():
logger.info("[{}] Setting default session".format(os.environ['SLURMD_NODENAME']))
with ops.default_session(self.sess):
try:
logger.info("[{}] Getting global step".format(os.environ['SLURMD_NODENAME']))
self.global_step = get_global_step()
logger.info("[{}] Start training with global_step={}".format(os.environ['SLURMD_NODENAME'], self.global_step))
if self.config.extra_arg['is_chief']:
server = neptune_mp_server.Server(
self.config.extra_arg['n_workers'],
port=self.config.extra_arg['port'],
debug_charts=self.config.extra_arg['debug_charts'],
adam_debug=self.config.extra_arg['adam_debug'],
schedule_hyper=self.config.extra_arg['schedule_hyper'],
experiment_dir=self.config.extra_arg['experiment_dir'])
server.main_loop()
callbacks.before_train()
for epoch in range(self.config.starting_epoch, self.config.max_epoch+1):
with timed_operation(
'Epoch {}, global_step={}'.format(
epoch, self.global_step + self.config.step_per_epoch)):
for step in tqdm.trange(
self.config.step_per_epoch,
**get_tqdm_kwargs(leave=True)):
if self.coord.should_stop():
return
self.run_step()
callbacks.trigger_step()
try:
self.global_step += 1
except:
self.global_step = -1
self.trigger_epoch()
print 'EPOCH ENDS HERE'
except (KeyboardInterrupt, Exception):
raise
finally:
# Do I need to run queue.close?
print('Handling finally block')
callbacks.after_train()
self.coord.request_stop()
self.summary_writer.close()
self.sess.close()