本文整理汇总了Python中tensorflow.container方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.container方法的具体用法?Python tensorflow.container怎么用?Python tensorflow.container使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.container方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_train_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import container [as 别名]
def create_train_model(model_creator, hparams, data_dir):
"""Create train graph, model, and iterator."""
train_data_path = []
for root, _, name in os.walk(os.path.join(data_dir, 'train_data')):
for x in name:
if x.split('.')[-1] == 'mat':
train_data_path.append(os.path.join(root, x))
assert len(train_data_path) == 1
train_data = scio.loadmat(*train_data_path)['data']
assert hparams.src_len == hparams.tgt_len == train_data.shape[1]
graph = tf.Graph()
with graph.as_default(), tf.container("train"):
# channels: [features, SBP, DBP, MBP]
train_src_data = train_data[:, :, 0:hparams.src_feature_size]
train_tgt_data = train_data[:, :, hparams.src_feature_size:hparams.src_feature_size + hparams.tgt_feature_size]
src_dataset = tf.data.Dataset.from_tensor_slices(train_src_data)
tgt_dataset = tf.data.Dataset.from_tensor_slices(train_tgt_data)
iterator = get_iterator(src_dataset, tgt_dataset, batch_size=hparams.batch_size,
random_seed=hparams.random_seed, is_train=True)
model = model_creator(hparams, iterator=iterator, mode=tf.contrib.learn.ModeKeys.TRAIN)
return TrainModel(graph=graph, model=model, iterator=iterator)
示例2: create_eval_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import container [as 别名]
def create_eval_model(model_creator, hparams, data_dir):
"""Create eval graph, model and iterator."""
eval_data_path = []
for root, _, name in os.walk(os.path.join(data_dir, 'eval_data')):
for x in name:
if x.split('.')[-1] == 'mat':
eval_data_path.append(os.path.join(root, x))
assert len(eval_data_path) == 1
eval_data = scio.loadmat(*eval_data_path)['data']
data_mean, data_std = load_data_mean_std(hparams, data_dir)
batch_size = eval_data.shape[0]
graph = tf.Graph()
with graph.as_default(), tf.container("eval"):
eval_src_data = eval_data[:, :, 0:hparams.src_feature_size]
# channels: [features, SBP, DBP, MBP]
eval_tgt_data = eval_data[:, :, hparams.src_feature_size:hparams.src_feature_size + hparams.tgt_feature_size]
src_dataset = tf.data.Dataset.from_tensor_slices(eval_src_data)
tgt_dataset = tf.data.Dataset.from_tensor_slices(eval_tgt_data)
iterator = get_iterator(src_dataset, tgt_dataset, batch_size=batch_size,
random_seed=hparams.random_seed, is_train=False)
model = model_creator(hparams, iterator=iterator, mode=tf.contrib.learn.ModeKeys.EVAL)
return EvalModel(graph=graph, model=model, iterator=iterator, data_mean=data_mean, data_std=data_std)
示例3: testContainer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import container [as 别名]
def testContainer(self):
with tf.Graph().as_default():
v0 = tf.Variable([0])
with tf.container("l1"):
v1 = tf.Variable([1])
with tf.container("l2"):
v2 = tf.Variable([2])
special_v = gen_state_ops._variable(shape=[1], dtype=tf.float32,
name="VariableInL3", container="l3", shared_name="")
v3 = tf.Variable([3])
v4 = tf.Variable([4])
self.assertEqual(tf.compat.as_bytes(""), v0.op.get_attr("container"))
self.assertEqual(tf.compat.as_bytes("l1"), v1.op.get_attr("container"))
self.assertEqual(tf.compat.as_bytes("l2"), v2.op.get_attr("container"))
self.assertEqual(tf.compat.as_bytes("l3"),
special_v.op.get_attr("container"))
self.assertEqual(tf.compat.as_bytes("l1"), v3.op.get_attr("container"))
self.assertEqual(tf.compat.as_bytes(""), v4.op.get_attr("container"))
示例4: testConstructorWithShapes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import container [as 别名]
def testConstructorWithShapes(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32),
shapes=(tf.TensorShape([1, 1, 2, 3]),
tf.TensorShape([5, 8])), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
示例5: testConstructor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import container [as 别名]
def testConstructor(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32), names=("i", "j"),
shared_name="foo", name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "j"], q.names)
示例6: testMultipleContainers
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import container [as 别名]
def testMultipleContainers(self):
with tf.container("test0"):
v0 = tf.Variable(1.0, name="v0")
with tf.container("test1"):
v1 = tf.Variable(2.0, name="v0")
server = tf.train.Server.create_local_server()
sess = tf.Session(server.target)
sess.run(tf.global_variables_initializer())
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
# Resets container. Session aborts.
tf.Session.reset(server.target, ["test0"])
with self.assertRaises(tf.errors.AbortedError):
sess.run(v1)
# Connects to the same target. Device memory for the v0 would have
# been released, so it will be uninitialized. But v1 should still
# be valid.
sess = tf.Session(server.target)
with self.assertRaises(tf.errors.FailedPreconditionError):
sess.run(v0)
self.assertAllEqual(2.0, sess.run(v1))
# Verifies various reset failures.
示例7: create_infer_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import container [as 别名]
def create_infer_model(model_creator, hparams, infer_data, batch_size):
"""Create inference model."""
graph = tf.Graph()
with graph.as_default(), tf.container("infer"):
infer_src_data = infer_data[:, :, 0:hparams.src_feature_size]
# channels:[features, SBP, SBP, MBP]
infer_tgt_data = infer_data[:, :, hparams.src_feature_size:hparams.src_feature_size + hparams.tgt_feature_size]
src_dataset = tf.data.Dataset.from_tensor_slices(infer_src_data)
tgt_dataset = tf.data.Dataset.from_tensor_slices(infer_tgt_data)
iterator = get_iterator(src_dataset, tgt_dataset, batch_size=batch_size,
random_seed=hparams.random_seed, is_train=False)
model = model_creator(hparams, iterator=iterator, mode=tf.contrib.learn.ModeKeys.INFER)
return InferModel(graph=graph, model=model, iterator=iterator)
示例8: create_eval_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import container [as 别名]
def create_eval_model(model_creator, hparams, scope=None, extra_args=None):
"""Create train graph, model, src/tgt file holders, and iterator."""
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "eval"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
src_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
tgt_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
src_dataset = tf.contrib.data.TextLineDataset(src_file_placeholder)
tgt_dataset = tf.contrib.data.TextLineDataset(tgt_file_placeholder)
iterator = iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
hparams.batch_size,
sos=hparams.sos,
eos=hparams.eos,
source_reverse=hparams.source_reverse,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=hparams.src_max_len_infer,
tgt_max_len=hparams.tgt_max_len_infer)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.EVAL,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return EvalModel(
graph=graph,
model=model,
src_file_placeholder=src_file_placeholder,
tgt_file_placeholder=tgt_file_placeholder,
iterator=iterator)
示例9: create_infer_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import container [as 别名]
def create_infer_model(model_creator, hparams, scope=None, extra_args=None):
"""Create inference model."""
graph = tf.Graph()
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
with graph.as_default(), tf.container(scope or "infer"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK)
src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)
src_dataset = tf.contrib.data.Dataset.from_tensor_slices(
src_placeholder)
iterator = iterator_utils.get_infer_iterator(
src_dataset,
src_vocab_table,
batch_size=batch_size_placeholder,
eos=hparams.eos,
source_reverse=hparams.source_reverse,
src_max_len=hparams.src_max_len_infer)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.INFER,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return InferModel(
graph=graph,
model=model,
src_placeholder=src_placeholder,
batch_size_placeholder=batch_size_placeholder,
iterator=iterator)
示例10: create_eval_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import container [as 别名]
def create_eval_model(model_creator, hparams, scope=None, extra_args=None):
"""Create train graph, model, src/tgt file holders, and iterator."""
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "eval"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
src_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
tgt_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
src_dataset = tf.data.TextLineDataset(src_file_placeholder)
tgt_dataset = tf.data.TextLineDataset(tgt_file_placeholder)
with tf.device('CPU:0'):
iterator = iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
hparams.batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=hparams.src_max_len_infer,
tgt_max_len=hparams.tgt_max_len_infer)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.EVAL,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return EvalModel(
graph=graph,
model=model,
src_file_placeholder=src_file_placeholder,
tgt_file_placeholder=tgt_file_placeholder,
iterator=iterator)
示例11: create_infer_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import container [as 别名]
def create_infer_model(model_creator, hparams, scope=None, extra_args=None):
"""Create inference model."""
graph = tf.Graph()
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
with graph.as_default(), tf.container(scope or "infer"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK)
src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)
src_dataset = tf.data.Dataset.from_tensor_slices(
src_placeholder)
iterator = iterator_utils.get_infer_iterator(
src_dataset,
src_vocab_table,
batch_size=batch_size_placeholder,
eos=hparams.eos,
src_max_len=hparams.src_max_len_infer)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.INFER,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return InferModel(
graph=graph,
model=model,
src_placeholder=src_placeholder,
batch_size_placeholder=batch_size_placeholder,
iterator=iterator)
示例12: testMultiQueueConstructor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import container [as 别名]
def testMultiQueueConstructor(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32), shared_name="foo", name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
示例13: testDequeueWithTimeout
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import container [as 别名]
def testDequeueWithTimeout(self):
with self.test_session(
config=tf.ConfigProto(operation_timeout_in_ms=20)) as sess:
q = tf.FIFOQueue(10, tf.float32)
self.assertEqual(tf.compat.as_bytes(""),
q.queue_ref.op.get_attr("container"))
dequeued_t = q.dequeue()
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaisesRegexp(tf.errors.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t)
示例14: testContainer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import container [as 别名]
def testContainer(self):
with tf.Graph().as_default():
with tf.container("test"):
q = tf.FIFOQueue(10, tf.float32)
self.assertEqual(tf.compat.as_bytes("test"),
q.queue_ref.op.get_attr("container"))
示例15: testSameVariablesClear
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import container [as 别名]
def testSameVariablesClear(self):
server = tf.train.Server.create_local_server()
# Creates a graph with 2 variables.
v0 = tf.Variable([[2, 1]], name="v0")
v1 = tf.Variable([[1], [2]], name="v1")
v2 = tf.matmul(v0, v1)
# Verifies that both sessions connecting to the same target return
# the same results.
sess_1 = tf.Session(server.target)
sess_2 = tf.Session(server.target)
sess_1.run(tf.global_variables_initializer())
self.assertAllEqual([[4]], sess_1.run(v2))
self.assertAllEqual([[4]], sess_2.run(v2))
# Resets target. sessions abort. Use sess_2 to verify.
tf.Session.reset(server.target)
with self.assertRaises(tf.errors.AbortedError):
self.assertAllEqual([[4]], sess_2.run(v2))
# Connects to the same target. Device memory for the variables would have
# been released, so they will be uninitialized.
sess_2 = tf.Session(server.target)
with self.assertRaises(tf.errors.FailedPreconditionError):
sess_2.run(v2)
# Reinitializes the variables.
sess_2.run(tf.global_variables_initializer())
self.assertAllEqual([[4]], sess_2.run(v2))
sess_2.close()
# Verifies behavior of tf.Session.reset() with multiple containers using
# default container names as defined by the target name.