本文整理汇总了Python中tensorflow.placeholder_with_default方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.placeholder_with_default方法的具体用法?Python tensorflow.placeholder_with_default怎么用?Python tensorflow.placeholder_with_default使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.placeholder_with_default方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_reset_forced
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder_with_default [as 别名]
def test_reset_forced(self):
reset = tf.placeholder_with_default(False, ())
batch_env = self._create_test_batch_env((2, 4))
algo = tools.MockAlgorithm(batch_env)
done, _, _ = tools.simulate(batch_env, algo, False, reset)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(done)
sess.run(done, {reset: True})
sess.run(done)
sess.run(done, {reset: True})
sess.run(done)
sess.run(done)
sess.run(done)
self.assertAllEqual([1, 2, 2, 2], batch_env[0].steps)
self.assertAllEqual([1, 2, 4], batch_env[1].steps)
示例2: testLSTMSeq2SeqAttention
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder_with_default [as 别名]
def testLSTMSeq2SeqAttention(self):
vocab_size = 9
x = np.random.random_integers(1, high=vocab_size - 1, size=(3, 5, 1, 1))
y = np.random.random_integers(1, high=vocab_size - 1, size=(3, 6, 1, 1))
hparams = lstm.lstm_attention()
p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size)
x = tf.constant(x, dtype=tf.int32)
x = tf.placeholder_with_default(x, shape=[None, None, 1, 1])
with self.test_session() as session:
features = {
"inputs": x,
"targets": tf.constant(y, dtype=tf.int32),
}
model = lstm.LSTMSeq2seqAttention(
hparams, tf.estimator.ModeKeys.TRAIN, p_hparams)
logits, _ = model(features)
session.run(tf.global_variables_initializer())
res = session.run(logits)
self.assertEqual(res.shape, (3, 6, 1, 1, vocab_size))
示例3: testLSTMSeq2seqAttentionBidirectionalEncoder
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder_with_default [as 别名]
def testLSTMSeq2seqAttentionBidirectionalEncoder(self):
vocab_size = 9
x = np.random.random_integers(1, high=vocab_size - 1, size=(3, 5, 1, 1))
y = np.random.random_integers(1, high=vocab_size - 1, size=(3, 6, 1, 1))
hparams = lstm.lstm_attention()
p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size)
x = tf.constant(x, dtype=tf.int32)
x = tf.placeholder_with_default(x, shape=[None, None, 1, 1])
with self.test_session() as session:
features = {
"inputs": x,
"targets": tf.constant(y, dtype=tf.int32),
}
model = lstm.LSTMSeq2seqAttentionBidirectionalEncoder(
hparams, tf.estimator.ModeKeys.TRAIN, p_hparams)
logits, _ = model(features)
session.run(tf.global_variables_initializer())
res = session.run(logits)
self.assertEqual(res.shape, (3, 6, 1, 1, vocab_size))
示例4: add_placeholders
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder_with_default [as 别名]
def add_placeholders(self):
"""
Add placeholders to the graph. Placeholders are used to feed in inputs.
"""
# Add placeholders for inputs.
# These are all batch-first: the None corresponds to batch_size and
# allows you to run the same model with variable batch_size
self.context_ids = tf.placeholder(tf.int32, shape=[None, self.FLAGS.context_len])
self.context_mask = tf.placeholder(tf.int32, shape=[None, self.FLAGS.context_len])
self.qn_ids = tf.placeholder(tf.int32, shape=[None, self.FLAGS.question_len])
self.qn_mask = tf.placeholder(tf.int32, shape=[None, self.FLAGS.question_len])
self.ans_span = tf.placeholder(tf.int32, shape=[None, 2])
# Add a placeholder to feed in the keep probability (for dropout).
# This is necessary so that we can instruct the model to use dropout when training, but not when testing
self.keep_prob = tf.placeholder_with_default(1.0, shape=())
示例5: create_tensor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder_with_default [as 别名]
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
if in_layers is None:
in_layers = self.in_layers
in_layers = convert_to_layers(in_layers)
try:
shape = self._shape
except NotImplementedError:
shape = None
if len(in_layers) > 0:
queue = in_layers[0]
placeholder = queue.out_tensors[self.get_pre_q_name()]
self.out_tensor = tf.placeholder_with_default(placeholder, self._shape)
return self.out_tensor
out_tensor = tf.placeholder(dtype=self.dtype, shape=self._shape)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
示例6: execute_cpu
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder_with_default [as 别名]
def execute_cpu(self, graph_fn, inputs):
"""Constructs the graph, executes it on CPU and returns the result.
Args:
graph_fn: a callable that constructs the tensorflow graph to test. The
arguments of this function should correspond to `inputs`.
inputs: a list of numpy arrays to feed input to the computation graph.
Returns:
A list of numpy arrays or a scalar returned from executing the tensorflow
graph.
"""
with self.test_session(graph=tf.Graph()) as sess:
placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]
results = graph_fn(*placeholders)
sess.run([tf.global_variables_initializer(), tf.tables_initializer(),
tf.local_variables_initializer()])
materialized_results = sess.run(results, feed_dict=dict(zip(placeholders,
inputs)))
if (len(materialized_results) == 1
and (isinstance(materialized_results, list)
or isinstance(materialized_results, tuple))):
materialized_results = materialized_results[0]
return materialized_results
示例7: testLSTMSeq2SeqAttention
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder_with_default [as 别名]
def testLSTMSeq2SeqAttention(self):
vocab_size = 9
x = np.random.randint(1, high=vocab_size, size=(3, 5, 1, 1))
y = np.random.randint(1, high=vocab_size, size=(3, 6, 1, 1))
hparams = lstm.lstm_attention()
p_hparams = problem_hparams.test_problem_hparams(vocab_size,
vocab_size,
hparams)
x = tf.constant(x, dtype=tf.int32)
x = tf.placeholder_with_default(x, shape=[None, None, 1, 1])
with self.test_session() as session:
features = {
"inputs": x,
"targets": tf.constant(y, dtype=tf.int32),
}
model = lstm.LSTMSeq2seqAttention(
hparams, tf.estimator.ModeKeys.TRAIN, p_hparams)
logits, _ = model(features)
session.run(tf.global_variables_initializer())
res = session.run(logits)
self.assertEqual(res.shape, (3, 6, 1, 1, vocab_size))
示例8: serving_input_fn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder_with_default [as 别名]
def serving_input_fn():
feature_placeholders = {
"user_id": tf.placeholder(tf.int32, [None]),
"item_id": tf.placeholder(tf.int32, [None]),
"age": tf.placeholder(tf.int32, [None]),
"gender": tf.placeholder(tf.string, [None]),
"occupation": tf.placeholder(tf.string, [None]),
"zipcode": tf.placeholder(tf.string, [None]),
"release_year": tf.placeholder(tf.int32, [None]),
}
feature_placeholders.update({
col: tf.placeholder_with_default(tf.constant([0]), [None]) for col in GENRE
})
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(
features=features,
receiver_tensors=feature_placeholders
)
示例9: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder_with_default [as 别名]
def __init__(self):
# Set the dimension number for the input feature maps
self.dim_input = FLAGS.img_size * FLAGS.img_size * 3
# Set the dimension number for the outputs
self.dim_output = FLAGS.way_num
# Load base learning rates from FLAGS
self.update_lr = FLAGS.base_lr
# Load the pre-train phase class number from FLAGS
self.pretrain_class_num = FLAGS.pretrain_class_num
# Set the initial meta learning rate
self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())
# Set the initial pre-train learning rate
self.pretrain_lr = tf.placeholder_with_default(FLAGS.pre_lr, ())
# Set the default objective functions for meta-train and pre-train
self.loss_func = xent
self.pretrain_loss_func = softmaxloss
# Set the default channel number to 3
self.channels = 3
# Load the image size from FLAGS
self.img_size = FLAGS.img_size
示例10: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder_with_default [as 别名]
def __init__(self, config, train_network, test_network, global_step, session):
self.opt_str = config.string("optimizer", "adam").lower()
self.train_network = train_network
self.test_network = test_network
self.session = session
self.global_step = global_step
self.validation_step_number = 0
self.gradient_clipping = config.float("gradient_clipping", -1.0)
self.learning_rates = config.int_key_dict("learning_rates")
self.curr_learning_rate = self.learning_rates[1]
self.lr_var = tf.placeholder(tf.float32, shape=[], name="learning_rate")
self.loss_scale_var = tf.placeholder_with_default(1.0, shape=[], name="loss_scale")
self.opt, self.reset_opt_op = self.create_optimizer(config)
grad_norm = None
if train_network is not None:
self._step_op, grad_norm = self.create_step_op_and_grad_norm()
self._update_ops = self.train_network.update_ops
else:
self._step_op = None
self._update_ops = None
self.summary_writer, self.summary_op_train, self.summary_op_test = self.init_summaries(config, grad_norm)
示例11: wrap_pholder
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder_with_default [as 别名]
def wrap_pholder(self, ph, feed):
"""wrap layer.h into placeholders"""
phtype = type(self.lay.h[ph])
if phtype is not dict: return
sig = '{}/{}'.format(self.scope, ph)
val = self.lay.h[ph]
self.lay.h[ph] = tf.placeholder_with_default(
val['dfault'], val['shape'], name = sig)
feed[self.lay.h[ph]] = val['feed']
示例12: create_rnn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder_with_default [as 别名]
def create_rnn(self, seq):
layers = [tf.nn.rnn_cell.GRUCell(size) for size in self.hidden_sizes]
cells = tf.nn.rnn_cell.MultiRNNCell(layers)
batch = tf.shape(seq)[0]
zero_states = cells.zero_state(batch, dtype=tf.float32)
self.in_state = tuple([tf.placeholder_with_default(state, [None, state.shape[1]])
for state in zero_states])
# this line to calculate the real length of seq
# all seq are padded to be of the same length, which is num_steps
length = tf.reduce_sum(tf.reduce_max(tf.sign(seq), 2), 1)
self.output, self.out_state = tf.nn.dynamic_rnn(cells, seq, length, self.in_state)
示例13: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder_with_default [as 别名]
def __init__(self, channels=1, n_class=2, model_kwargs={}):
tf.reset_default_graph()
self.n_class = n_class
self.channels = channels
self.x = tf.placeholder("float", shape=[None, None, None, self.channels], name="inImg")
# These are not used now
# self.keep_prob = tf.placeholder_with_default(1.0, [])
# self.is_training = tf.placeholder_with_default(tf.constant(False), [])
self.scale_space_num = model_kwargs.get("scale_space_num", 6)
self.res_depth = model_kwargs.get("res_depth", 3)
self.featRoot = model_kwargs.get("featRoot", 8)
self.filter_size = model_kwargs.get("filter_size", 3)
self.pool_size = model_kwargs.get("pool_size", 2)
self.activation_name = model_kwargs.get("activation_name", "relu")
if self.activation_name is "relu":
self.activation = tf.nn.relu
if self.activation_name is "elu":
self.activation = tf.nn.elu
self.model = model_kwargs.get("model", "aru")
self.num_scales = model_kwargs.get("num_scales", 5)
self.final_act = model_kwargs.get("final_act", "softmax")
print("Model Type: " + self.model)
logits = create_aru_net(self.x, self.channels, self.n_class, self.scale_space_num, self.res_depth,
self.featRoot, self.filter_size, self.pool_size, self.activation, self.model,
self.num_scales)
self.logits = tf.identity(logits, 'logits')
if self.final_act is "softmax":
self.predictor = tf.nn.softmax(self.logits, name='output')
elif self.final_act is "sigmoid":
self.predictor = tf.nn.sigmoid(self.logits, name='output')
elif self.final_act is "identity":
self.predictor = tf.identity(self.logits, name='output')
示例14: get_message_and_key
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder_with_default [as 别名]
def get_message_and_key(self):
"""Generate random pseudo-boolean key and message values."""
batch_size = tf.placeholder_with_default(FLAGS.batch_size, shape=[])
in_m = batch_of_random_bools(batch_size, TEXT_SIZE)
in_k = batch_of_random_bools(batch_size, KEY_SIZE)
return in_m, in_k
示例15: test_done_forced
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder_with_default [as 别名]
def test_done_forced(self):
reset = tf.placeholder_with_default(False, ())
batch_env = self._create_test_batch_env((2, 4))
algo = tools.MockAlgorithm(batch_env)
done, _, _ = tools.simulate(batch_env, algo, False, reset)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllEqual([False, False], sess.run(done))
self.assertAllEqual([False, False], sess.run(done, {reset: True}))
self.assertAllEqual([True, False], sess.run(done))
self.assertAllEqual([False, False], sess.run(done, {reset: True}))
self.assertAllEqual([True, False], sess.run(done))
self.assertAllEqual([False, False], sess.run(done))
self.assertAllEqual([True, True], sess.run(done))