本文整理汇总了Python中tensorflow.placeholder方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.placeholder方法的具体用法?Python tensorflow.placeholder怎么用?Python tensorflow.placeholder使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.placeholder方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_forward
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder [as 别名]
def build_forward(self):
verbalise = self.FLAGS.verbalise
# Placeholders
inp_size = [None] + self.meta['inp_size']
self.inp = tf.placeholder(tf.float32, inp_size, 'input')
self.feed = dict() # other placeholders
# Build the forward pass
state = identity(self.inp)
roof = self.num_layer - self.ntrain
self.say(HEADER, LINE)
for i, layer in enumerate(self.darknet.layers):
scope = '{}-{}'.format(str(i),layer.type)
args = [layer, state, i, roof, self.feed]
state = op_create(*args)
mess = state.verbalise()
self.say(mess)
self.say(LINE)
self.top = state
self.out = tf.identity(state.out, name='output')
示例2: _build_input
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder [as 别名]
def _build_input(self):
self.tails = tf.placeholder(tf.int32, [None])
self.heads = tf.placeholder(tf.int32, [None])
self.targets = tf.one_hot(indices=self.heads, depth=self.num_entity)
if not self.query_is_language:
self.queries = tf.placeholder(tf.int32, [None, self.num_step])
self.query_embedding_params = tf.Variable(self._random_uniform_unit(
self.num_query + 1, # <END> token
self.query_embed_size),
dtype=tf.float32)
rnn_inputs = tf.nn.embedding_lookup(self.query_embedding_params,
self.queries)
else:
self.queries = tf.placeholder(tf.int32, [None, self.num_step, self.num_word])
self.vocab_embedding_params = tf.Variable(self._random_uniform_unit(
self.num_vocab + 1, # <END> token
self.vocab_embed_size),
dtype=tf.float32)
embedded_query = tf.nn.embedding_lookup(self.vocab_embedding_params,
self.queries)
rnn_inputs = tf.reduce_mean(embedded_query, axis=2)
return rnn_inputs
示例3: autosummary
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder [as 别名]
def autosummary(name, value):
id = name.replace('/', '_')
if is_tf_expression(value):
with tf.name_scope('summary_' + id), tf.device(value.device):
update_op = _create_autosummary_var(name, value)
with tf.control_dependencies([update_op]):
return tf.identity(value)
else: # python scalar or numpy array
if name not in _autosummary_immediate:
with absolute_name_scope('Autosummary/' + id), tf.device(None), tf.control_dependencies(None):
update_value = tf.placeholder(tf.float32)
update_op = _create_autosummary_var(name, update_value)
_autosummary_immediate[name] = update_op, update_value
update_op, update_value = _autosummary_immediate[name]
run(update_op, {update_value: np.float32(value)})
return value
# Create the necessary ops to include autosummaries in TensorBoard report.
# Note: This should be done only once per graph.
示例4: binary_refinement
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder [as 别名]
def binary_refinement(sess,Best_X_adv,
X_adv, Y, ALPHA, ub, lb, model, dataset='cifar'):
num_samples = np.shape(X_adv)[0]
print(dataset)
if(dataset=="mnist"):
X_place = tf.placeholder(tf.float32, shape=[1, 1, 28, 28])
else:
X_place = tf.placeholder(tf.float32, shape=[1, 3, 32, 32])
pred = model(X_place)
for i in range(num_samples):
logits_op = sess.run(pred,feed_dict={X_place:X_adv[i:i+1,:,:,:]})
if(not np.argmax(logits_op) == np.argmax(Y[i,:])):
# Success, increase alpha
Best_X_adv[i,:,:,:] = X_adv[i,:,:,]
lb[i] = ALPHA[i,0]
else:
ub[i] = ALPHA[i,0]
ALPHA[i] = 0.5*(lb[i] + ub[i])
return ALPHA, Best_X_adv
示例5: test_fprop
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder [as 别名]
def test_fprop(self):
import tensorflow as tf
model = KerasModelWrapper(self.model)
x = tf.placeholder(tf.float32, shape=(None, 100))
out_dict = model.fprop(x)
self.assertEqual(set(out_dict.keys()), set(['l1', 'l2', 'softmax']))
# Test the dimension of the hidden represetation
self.assertEqual(int(out_dict['l1'].shape[1]), 20)
self.assertEqual(int(out_dict['l2'].shape[1]), 10)
# Test the caching
x2 = tf.placeholder(tf.float32, shape=(None, 100))
out_dict2 = model.fprop(x2)
self.assertEqual(set(out_dict2.keys()), set(['l1', 'l2', 'softmax']))
self.assertEqual(int(out_dict2['l1'].shape[1]), 20)
示例6: test_generate_gives_adversarial_example
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder [as 别名]
def test_generate_gives_adversarial_example(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
feed_labs = np.zeros((100, 2))
feed_labs[np.arange(100), orig_labs] = 1
x = tf.placeholder(tf.float32, x_val.shape)
y = tf.placeholder(tf.float32, feed_labs.shape)
x_adv_p = self.attack.generate(x, max_iterations=100,
binary_search_steps=3,
initial_const=1,
clip_min=-5, clip_max=5,
batch_size=100, y=y)
self.assertEqual(x_val.shape, x_adv_p.shape)
x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs})
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
示例7: test_generate_targeted_gives_adversarial_example
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder [as 别名]
def test_generate_targeted_gives_adversarial_example(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
feed_labs = np.zeros((100, 2))
feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1
x = tf.placeholder(tf.float32, x_val.shape)
y = tf.placeholder(tf.float32, feed_labs.shape)
x_adv_p = self.attack.generate(x, max_iterations=100,
binary_search_steps=3,
initial_const=1,
clip_min=-5, clip_max=5,
batch_size=100, y_target=y)
self.assertEqual(x_val.shape, x_adv_p.shape)
x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs})
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs)
> 0.9)
示例8: jacobian_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder [as 别名]
def jacobian_graph(predictions, x, nb_classes):
"""
Create the Jacobian graph to be ran later in a TF session
:param predictions: the model's symbolic output (linear output,
pre-softmax)
:param x: the input placeholder
:param nb_classes: the number of classes the model has
:return:
"""
# This function will return a list of TF gradients
list_derivatives = []
# Define the TF graph elements to compute our derivatives for each class
for class_ind in xrange(nb_classes):
derivatives, = tf.gradients(predictions[:, class_ind], x)
list_derivatives.append(derivatives)
return list_derivatives
示例9: network_surgery
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder [as 别名]
def network_surgery():
tf.reset_default_graph()
inputs = tf.placeholder(tf.float32,
shape=(None, 131072, 4),
name='inputs')
targets = tf.placeholder(tf.float32, shape=(None, 1024, 4229),
name='targets')
targets_na = tf.placeholder(tf.bool, shape=(None, 1024), name="targets_na")
preds_adhoc = tf.placeholder(tf.float32, shape=(None, 960, 4229), name="Placeholder_15")
saver = tf.train.import_meta_graph("model_files/model.tf.meta",
input_map={'Placeholder_15:0': preds_adhoc,
'Placeholder:0': targets_na,
'inputs:0': inputs,
'targets:0': targets
})
ops = tf.get_default_graph().get_operations()
out = tf.train.export_meta_graph(filename='model_files/model.tf-modified.meta', as_text=True)
ops[:15]
示例10: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder [as 别名]
def __init__(self,
channel_1_num,
channel_2_num,
conv_size,
hidden_size,
pool_size,
learning_rate,
x_dim=784,
y_dim=10):
self.channel_1_num = channel_1_num
self.channel_2_num = channel_2_num
self.conv_size = conv_size
self.hidden_size = hidden_size
self.pool_size = pool_size
self.learning_rate = learning_rate
self.x_dim = x_dim
self.y_dim = y_dim
self.images = tf.placeholder(tf.float32, [None, self.x_dim], name='input_x')
self.labels = tf.placeholder(tf.float32, [None, self.y_dim], name='input_y')
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.train_step = None
self.accuracy = None
示例11: createLinearModel
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder [as 别名]
def createLinearModel(dimension):
np.random.seed(1024)
# 定义 x 和 y
x = tf.placeholder(tf.float64, shape=[None, dimension], name='x')
# 写成矩阵形式会大大加快运算速度
y = tf.placeholder(tf.float64, shape=[None, 1], name='y')
# 定义参数估计值和预测值
betaPred = tf.Variable(np.random.random([dimension, 1]))
yPred = tf.matmul(x, betaPred, name='y_pred')
# 定义损失函数
loss = tf.reduce_mean(tf.square(yPred - y))
model = {
'loss_function': loss,
'independent_variable': x,
'dependent_variable': y,
'prediction': yPred,
'model_params': betaPred
}
return model
示例12: setup_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder [as 别名]
def setup_graph(self, input_audio_batch, target_phrase):
batch_size = input_audio_batch.shape[0]
weird = (input_audio_batch.shape[1] - 1) // 320
logits_arg2 = np.tile(weird, batch_size)
dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
pass_in = np.clip(input_audio_batch, -2**15, 2**15-1)
seq_len = np.tile(weird, batch_size).astype(np.int32)
with tf.variable_scope('', reuse=tf.AUTO_REUSE):
inputs = tf.placeholder(tf.float32, shape=pass_in.shape, name='a')
len_batch = tf.placeholder(tf.float32, name='b')
arg2_logits = tf.placeholder(tf.int32, shape=logits_arg2.shape, name='c')
arg1_dense = tf.placeholder(tf.float32, shape=dense_arg1.shape, name='d')
arg2_dense = tf.placeholder(tf.int32, shape=dense_arg2.shape, name='e')
len_seq = tf.placeholder(tf.int32, shape=seq_len.shape, name='f')
logits = get_logits(inputs, arg2_logits)
target = ctc_label_dense_to_sparse(arg1_dense, arg2_dense, len_batch)
ctcloss = tf.nn.ctc_loss(labels=tf.cast(target, tf.int32), inputs=logits, sequence_length=len_seq)
decoded, _ = tf.nn.ctc_greedy_decoder(logits, arg2_logits, merge_repeated=True)
sess = tf.Session()
saver = tf.train.Saver(tf.global_variables())
saver.restore(sess, "models/session_dump")
func1 = lambda a, b, c, d, e, f: sess.run(ctcloss,
feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
func2 = lambda a, b, c, d, e, f: sess.run([ctcloss, decoded],
feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
return (func1, func2)
示例13: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder [as 别名]
def __init__(self, config):
entity_total = config.entity
relation_total = config.relation
batch_size = config.batch_size
size = config.hidden_size
margin = config.margin
self.pos_h = tf.placeholder(tf.int32, [None])
self.pos_t = tf.placeholder(tf.int32, [None])
self.pos_r = tf.placeholder(tf.int32, [None])
self.neg_h = tf.placeholder(tf.int32, [None])
self.neg_t = tf.placeholder(tf.int32, [None])
self.neg_r = tf.placeholder(tf.int32, [None])
with tf.name_scope("embedding"):
self.ent_embeddings = tf.get_variable(name = "ent_embedding", shape = [entity_total, size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.rel_embeddings = tf.get_variable(name = "rel_embedding", shape = [relation_total, size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
pos_h_e = tf.nn.embedding_lookup(self.ent_embeddings, self.pos_h)
pos_t_e = tf.nn.embedding_lookup(self.ent_embeddings, self.pos_t)
pos_r_e = tf.nn.embedding_lookup(self.rel_embeddings, self.pos_r)
neg_h_e = tf.nn.embedding_lookup(self.ent_embeddings, self.neg_h)
neg_t_e = tf.nn.embedding_lookup(self.ent_embeddings, self.neg_t)
neg_r_e = tf.nn.embedding_lookup(self.rel_embeddings, self.neg_r)
if config.L1_flag:
pos = tf.reduce_sum(abs(pos_h_e + pos_r_e - pos_t_e), 1, keep_dims = True)
neg = tf.reduce_sum(abs(neg_h_e + neg_r_e - neg_t_e), 1, keep_dims = True)
self.predict = pos
else:
pos = tf.reduce_sum((pos_h_e + pos_r_e - pos_t_e) ** 2, 1, keep_dims = True)
neg = tf.reduce_sum((neg_h_e + neg_r_e - neg_t_e) ** 2, 1, keep_dims = True)
self.predict = pos
with tf.name_scope("output"):
self.loss = tf.reduce_sum(tf.maximum(pos - neg + margin, 0))
示例14: build_input_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder [as 别名]
def build_input_graph(self, vocab_size, emb_size, word_vocab_size, word_emb_size, word_window_size):
"""
Gather embeddings from lookup tables.
"""
seq_ids = tf.placeholder(dtype=INT_TYPE, shape=[None, None], name='seq_ids')
seq_word_ids = [tf.placeholder(dtype=INT_TYPE, shape=[None, None], name='seq_feature_%d_ids' % i)
for i in range(word_window_size)]
embeddings = tf.get_variable('embeddings', [vocab_size, emb_size])
embedding_output = tf.nn.embedding_lookup([embeddings], seq_ids)
word_outputs = []
word_embeddings = tf.get_variable('word_embeddings', [word_vocab_size, word_emb_size])
for i in range(word_window_size):
word_outputs.append(tf.nn.embedding_lookup([word_embeddings], seq_word_ids[i]))
return seq_ids, seq_word_ids, tf.concat([embedding_output] + word_outputs, 2, 'inputs')
示例15: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import placeholder [as 别名]
def __init__(
self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size, trainableEmbeddings):
# Placeholders for input, output and dropout
self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0, name="l2_loss")
# Embedding layer
with tf.name_scope("embedding"):
self.W = tf.Variable(
tf.constant(0.0, shape=[vocab_size, embedding_size]),
trainable=trainableEmbeddings,name="W")
self.embedded_words1 = tf.nn.embedding_lookup(self.W, self.input_x1)
self.embedded_words2 = tf.nn.embedding_lookup(self.W, self.input_x2)
print self.embedded_words1
# Create a convolution + maxpool layer for each filter size
with tf.name_scope("output"):
self.out1=self.stackedRNN(self.embedded_words1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
self.out2=self.stackedRNN(self.embedded_words2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
self.distance = tf.reshape(self.distance, [-1], name="distance")
with tf.name_scope("loss"):
self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
#### Accuracy computation is outside of this class.
with tf.name_scope("accuracy"):
self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
correct_predictions = tf.equal(self.temp_sim, self.input_y)
self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")