本文整理汇总了Python中tensorflow.get_collection函数的典型用法代码示例。如果您正苦于以下问题:Python get_collection函数的具体用法?Python get_collection怎么用?Python get_collection使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_collection函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_model
def build_model(self):
self.x = tf.placeholder(tf.float32, [self.reader.vocab_size], name="input")
self.x_idx = tf.placeholder(tf.int32, [None], name='x_idx') # mask paddings
self.build_encoder()
self.build_generator()
self.objective = self.kl +self.recons_loss
# optimizer for alternative update
optimizer1 = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
optimizer2 = tf.train.AdamOptimizer(learning_rate=0.1)
fullvars = tf.GraphKeys.TRAINABLE_VARIABLES
print 'fullvars:',fullvars
enc_vars = tf.get_collection(fullvars,scope='encoder')
print enc_vars
dec_vars = tf.get_collection(fullvars,scope='generator')
print dec_vars
self.lossL2_enc = tf.add_n([ tf.nn.l2_loss(v) for v in enc_vars if 'bias' not in v.name]) * 0.0001
self.lossL2_dec = tf.add_n([ tf.nn.l2_loss(v) for v in dec_vars if 'bias' not in v.name])
print 'lossL2_enc:',self.lossL2_enc
print 'lossL2_dec:',self.lossL2_dec
enc_grads = tf.gradients(self.kl+self.lossL2_enc, enc_vars)
dec_grads = tf.gradients(self.recons_loss+self.lossL2_dec, dec_vars)
self.optim_enc = optimizer1.apply_gradients(zip(enc_grads, enc_vars))
self.optim_dec = optimizer2.apply_gradients(zip(dec_grads, dec_vars))
示例2: loss
def loss(self, predicts, labels, objects_num):
"""Add Loss to all the trainable variables
Args:
predicts: 4-D tensor [batch_size, cell_size, cell_size, 5 * boxes_per_cell]
===> (num_classes, boxes_per_cell, 4 * boxes_per_cell)
labels : 3-D tensor of [batch_size, max_objects, 5]
objects_num: 1-D tensor [batch_size]
"""
class_loss = tf.constant(0, tf.float32)
object_loss = tf.constant(0, tf.float32)
noobject_loss = tf.constant(0, tf.float32)
coord_loss = tf.constant(0, tf.float32)
loss = [0, 0, 0, 0]
for i in range(self.batch_size):
predict = predicts[i, :, :, :]
label = labels[i, :, :]
object_num = objects_num[i]
nilboy = tf.ones([7,7,2])
tuple_results = tf.while_loop(self.cond1, self.body1, [tf.constant(0), object_num, [class_loss, object_loss, noobject_loss, coord_loss], predict, label, nilboy])
for j in range(4):
loss[j] = loss[j] + tuple_results[2][j]
nilboy = tuple_results[5]
tf.add_to_collection('losses', (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size)
tf.summary.scalar('class_loss', loss[0]/self.batch_size)
tf.summary.scalar('object_loss', loss[1]/self.batch_size)
tf.summary.scalar('noobject_loss', loss[2]/self.batch_size)
tf.summary.scalar('coord_loss', loss[3]/self.batch_size)
tf.summary.scalar('weight_loss', tf.add_n(tf.get_collection('losses')) - (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size )
return tf.add_n(tf.get_collection('losses'), name='total_loss'), nilboy
示例3: optimize_test
def optimize_test(self):
# Test time optimizer to compare log-likelihood score of ZINB-WaVE
update_ops_test = tf.get_collection(tf.GraphKeys.UPDATE_OPS, "variational")
test_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "variational")
optimizer_test = tf.train.AdamOptimizer(learning_rate=0.001, epsilon=0.1)
with tf.control_dependencies(update_ops_test):
self.test_step = optimizer_test.minimize(self.loss, var_list=test_vars)
示例4: __init__
def __init__(self, params=params, dyn='FCC'):
tf.reset_default_graph()
data = self.sample_mog(params['batch_size'])
noise = ds.Normal(tf.zeros(params['z_dim']),
tf.ones(params['z_dim'])).sample(params['batch_size'])
# Construct generator and discriminator nets
with slim.arg_scope([slim.fully_connected], weights_initializer=tf.orthogonal_initializer(gain=1.4)):
samples = self.generator(noise, output_dim=params['x_dim'])
real_score = self.discriminator(data)
fake_score = self.discriminator(samples, reuse=True)
# Saddle objective
loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=real_score, labels=tf.ones_like(real_score)) +
tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_score, labels=tf.zeros_like(fake_score)))
gen_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "generator")
disc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "discriminator")
gen_shapes = [tuple(v.get_shape().as_list()) for v in gen_vars]
disc_shapes = [tuple(v.get_shape().as_list()) for v in disc_vars]
# Generator gradient
g_opt = tf.train.GradientDescentOptimizer(learning_rate=params['gen_learning_rate'])
g_grads = g_opt.compute_gradients(-loss, var_list=gen_vars)
# Discriminator gradient
d_opt = tf.train.GradientDescentOptimizer(learning_rate=params['disc_learning_rate'])
d_grads = d_opt.compute_gradients(loss, var_list=disc_vars)
# Squared Norm of Gradient: d/dx 1/2||F||^2 = J^T F
grads_norm_sep = [tf.reduce_sum(g[0]**2) for g in g_grads+d_grads]
grads_norm = 0.5*tf.reduce_sum(grads_norm_sep)
# Gradient of Squared Norm
JTF = tf.gradients(grads_norm, xs=gen_vars+disc_vars)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
self.params = params
self.data = data
self.samples = samples
self.gen_vars = gen_vars
self.disc_vars = disc_vars
self.gen_shapes = gen_shapes
self.disc_shapes = disc_shapes
self.Fg = g_grads
self.Fd = d_grads
self.JTF = JTF
self.sess = sess
self.findiff_step = params['findiff_step']
self.gamma = params['gamma']
self.dyn = dyn
if dyn == 'FCC':
self.F = self.FCC
else:
self.F = self._F
示例5: testAddWeight
def testAddWeight(self):
with self.test_session():
layer = base_layers._Layer(name='my_layer')
# Test basic variable creation.
variable = layer._add_weight('my_var', [2, 2],
initializer=tf.zeros_initializer)
self.assertEqual(variable.name, 'my_var:0')
self.assertListEqual(layer.weights, [variable])
self.assertListEqual(layer.trainable_weights, [variable])
self.assertListEqual(layer.non_trainable_weights, [])
self.assertListEqual(layer.weights,
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))
# Test non-trainable variable creation.
# layer._add_weight should work even outside `build` and `call`.
variable_2 = layer._add_weight('non_trainable_var', [2, 2],
initializer=tf.zeros_initializer,
trainable=False)
self.assertListEqual(layer.weights, [variable, variable_2])
self.assertListEqual(layer.trainable_weights, [variable])
self.assertListEqual(layer.non_trainable_weights, [variable_2])
self.assertEqual(
len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)), 1)
# Test with regularizer.
regularizer = lambda x: tf.reduce_sum(x) * 1e-3
variable = layer._add_weight('reg_var', [2, 2],
initializer=tf.zeros_initializer,
regularizer=regularizer)
self.assertEqual(len(layer.losses), 1)
示例6: loss
def loss(y, model_vars, Y, l2_reg, scope=None):
"""
L2-loss model on top of the network raw output.
Args:
y: network output tensor
model_vars: [w_conv, thresh, w_e, w_s, w_d]
Y: ground truth tensor
l2_reg: l2 regularization strength
scope: unique prefix string identifying the tower, e.g. 'tower_00'
Returns:
total_loss: total loss Tensor
"""
sq_loss = tf.nn.l2_loss(y - Y, name='sq_loss')
tf.add_to_collection('losses', sq_loss)
if l2_reg > 0:
with tf.name_scope('l2_decay'):
w_conv, thresh, w_e, w_s, w_d = model_vars
for decay_var in [w_conv, w_e, w_s, w_d]:
weight_decay = tf.mul(tf.nn.l2_loss(decay_var), l2_reg)
tf.add_to_collection('losses', weight_decay)
total_loss = tf.add_n(tf.get_collection('losses', scope=scope), name='total_loss')
# Add loss summaries
for loss in tf.get_collection('losses', scope=scope) + [total_loss]:
loss_name = re.sub('%s_[0-9]*/' % FLAGS.tower_name, '', loss.op.name)
tf.scalar_summary(loss_name, loss)
return total_loss
示例7: combined_loss_G
def combined_loss_G(self,batch_size_tf):
"""
Calculates the sum of the combined adversarial, lp and GDL losses in the given proportion. Used
for training the generative model.
@param gen_frames: A list of tensors of the generated frames at each scale.
@param gt_frames: A list of tensors of the ground truth frames at each scale.
@param d_preds: A list of tensors of the classifications made by the discriminator model at each
scale.
@param lam_adv: The percentage of the adversarial loss to use in the combined loss.
@param lam_lp: The percentage of the lp loss to use in the combined loss.
@param lam_gdl: The percentage of the GDL loss to use in the combined loss.
@param l_num: 1 or 2 for l1 and l2 loss, respectively).
@param alpha: The power to which each gradient term is raised in GDL loss.
@return: The combined adversarial, lp and GDL losses.
"""
diceterm=loss_dice(self.G, self.CT_GT, self.num_classes,batch_size_tf)
fcnterm=lossfcn(self.G, self.CT_GT, self.num_classes, batch_size_tf, self.classweights)
if self.adversarial:
bceterm=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(self.D_logits_, tf.ones_like(self.D_)))
loss_=self.lam_dice*diceterm + self.lam_fcn*fcnterm + self.lam_adv*bceterm
tf.add_to_collection('losses', loss_)
loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
return loss, diceterm, fcnterm, bceterm
else:
loss_=self.lam_dice*diceterm + self.lam_fcn*fcnterm
tf.add_to_collection('losses', loss_)
loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
return loss, self.lam_dice*diceterm, self.lam_fcn*fcnterm
示例8: testCustomMainOp
def testCustomMainOp(self):
export_dir = os.path.join(tf.test.get_temp_dir(), "test_main_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=tf.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = tf.Variable(1, name="v1")
tf.add_to_collection("v", v1)
v2 = tf.Variable(2, name="v2")
tf.add_to_collection("v", v2)
# Initialize another variable `v3` to 42.
v3 = tf.Variable(42, name="v3")
tf.add_to_collection("v", v3)
# Set up an assignment op to be run as part of the main_op.
with tf.control_dependencies([main_op.main_op()]):
add_v1_v2 = tf.add(v1._ref(), v2._ref())
custom_main_op = tf.group(tf.assign(v3, add_v1_v2))
sess.run(custom_main_op)
builder.add_meta_graph_and_variables(
sess, ["foo"], main_op=custom_main_op)
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=tf.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, tf.get_collection("v")[0].eval())
self.assertEqual(2, tf.get_collection("v")[1].eval())
# Evaluates to the sum of the first two variables and assigned as part of
# the main_op, following a restore.
self.assertEqual(3, tf.get_collection("v")[2].eval())
示例9: __init_output
def __init_output(self):
with tf.variable_scope('output'):
# Losses
self.regularization_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
self.cross_entropy_loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y, name='loss'))
self.loss = self.regularization_loss + self.cross_entropy_loss
# Optimizer
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.args.learning_rate)
self.train_op = self.optimizer.minimize(self.loss)
# This is for debugging NaNs. Check TensorFlow documentation.
self.check_op = tf.add_check_numerics_ops()
# Output and Metrics
self.y_out_softmax = tf.nn.softmax(self.logits)# softmax 归一化分类
self.y_out_argmax = tf.argmax(self.y_out_softmax, axis=-1, output_type=tf.int32)# 最大值得到分类结果
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.y, self.y_out_argmax), tf.float32))#准确度
# 记录参数
with tf.name_scope('train-summary-per-iteration'):
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('acc', self.accuracy)
self.summaries_merged = tf.summary.merge_all()
示例10: testTags
def testTags(self):
export_dir = os.path.join(tf.test.get_temp_dir(), "test_tags")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
# - a single tag (from predefined constants).
with self.test_session(graph=tf.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - a single tag (from predefined constants).
with self.test_session(graph=tf.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph([tag_constants.SERVING])
# Graph that updates the single variable. SavedModel is invoked:
# - to add the model (weights are not updated).
# - multiple custom tags.
with self.test_session(graph=tf.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 44)
builder.add_meta_graph(["foo", "bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were saved.
with self.test_session(graph=tf.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(
42, tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with a single predefined tag whose variables were not
# saved.
with self.test_session(graph=tf.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
self.assertEqual(
42, tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple tags. Provide duplicate tags to test set
# semantics.
with self.test_session(graph=tf.Graph()) as sess:
loader.load(sess, ["foo", "bar", "foo"], export_dir)
self.assertEqual(
42, tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Try restoring a graph with a non-existent tag. This should yield a runtime
# error.
with self.test_session(graph=tf.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["INVALID"],
export_dir)
# Try restoring a graph where a subset of the tags match. Since tag matching
# for meta graph defs follows "all" semantics, this should yield a runtime
# error.
with self.test_session(graph=tf.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["foo", "baz"],
export_dir)
示例11: testSaveAsText
def testSaveAsText(self):
export_dir = os.path.join(tf.test.get_temp_dir(), "test_astext")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.test_session(graph=tf.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.test_session(graph=tf.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.test_session(graph=tf.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with tag "bar", whose variables were not saved.
with self.test_session(graph=tf.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
self.assertEqual(
42, tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[0].eval())
示例12: _update_network
def _update_network(self, trainer):
self.actions = tf.placeholder(shape=[None], dtype=tf.int32)
self.actions_onehot = tf.one_hot(
self.actions, self.a_dim, dtype=tf.float32)
self.target_v = tf.placeholder(shape=[None], dtype=tf.float32)
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
self.outputs = tf.reduce_sum(
self.policy * self.actions_onehot, [1])
# loss
self.value_loss = 0.5 * tf.reduce_sum(tf.square(
self.target_v - tf.reshape(self.value, [-1])))
# higher entropy -> lower loss -> encourage exploration
self.entropy = -tf.reduce_sum(self.policy * tf.log(self.policy))
self.policy_loss = -tf.reduce_sum(
tf.log(self.outputs) * self.advantages)
self.loss = 0.5 * self.value_loss \
+ self.policy_loss - 0.01 * self.entropy
# local gradients
local_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
self.gradients = tf.gradients(self.loss, local_vars)
self.var_norms = tf.global_norm(local_vars)
# grads[i] * clip_norm / max(global_norm, clip_norm)
grads, self.grad_norms = tf.clip_by_global_norm(self.gradients, 40.0)
# apply gradients to global network
global_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, 'global')
self.apply_grads = trainer.apply_gradients(zip(grads, global_vars))
示例13: testmodel1
def testmodel1():
print "\033[1;31mbegin load model>>>\033[0m"
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph("saver_checkpoint.meta") # 恢复图模型
new_saver.restore(sess, "saver_checkpoint") # 恢复数据
# tf.get_collection() returns a list. In this example we only want the first one.
predict = tf.get_collection("predict")[0]
x = tf.get_collection("x")[0]
y_ = tf.get_collection("y_")[0]
keep_prob = tf.get_collection("keep_prob")[0]
# 单值预测
print "\033[1;31msingle predict\033[0m"
test = test_samples[1,].reshape(1, 784)
mark = np.diag([1] * 4)
prev = sess.run(predict, feed_dict={x: test, y_: mark, keep_prob: 1.0})
print u"[prev]:", chr(prev.tolist().index(1) + 65)
# 混淆矩阵测试
print "\033[1;31mbatch matrix\033[0m"
pre_labels = []
for sample in test_samples.tolist():
sample = np.array(sample)
pre_label = sess.run(predict, feed_dict={x: sample.reshape(1, 784), y_: mark, keep_prob: 1.0})
pre_labels.append(pre_label)
pre_char_labels = [chr(l.tolist().index(True) + 65) for l in pre_labels] # 预测值按真假分
test_char_labels = [chr(l.tolist().index(1) + 65) for l in test_labels] # 实际这是01分
print metrics.confusion_matrix(pre_char_labels, test_char_labels)
print "\033[1;31mpredict done!\033[0m"
示例14: __init__
def __init__(self,
n_actions,
n_features,
learning_rate=0.01,
reward_decay=0.9, # gamma
epsilon_greedy=0.9, # epsilon
epsilon_increment = 0.001,
replace_target_iter=300, # 更新target网络的间隔步数
buffer_size=500, # 样本缓冲区
batch_size=32,
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = epsilon_greedy
self.replace_target_iter = replace_target_iter
self.buffer_size = buffer_size
self.buffer_counter = 0 # 统计目前进入过buffer的数量
self.batch_size = batch_size
self.epsilon = 0 if epsilon_increment is not None else epsilon_greedy
self.epsilon_max = epsilon_greedy
self.epsilon_increment = epsilon_increment
self.learn_step_counter = 0 # 学习计步器
self.buffer = np.zeros((self.buffer_size, n_features * 2 + 2)) # 初始化Experience buffer[s,a,r,s_]
self.build_net()
# 将eval网络中参数全部更新到target网络
target_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_net')
eval_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='eval_net')
with tf.variable_scope('soft_replacement'):
self.target_replace_op = [tf.assign(t, e) for t, e in zip(target_params, eval_params)]
self.sess = tf.Session()
tf.summary.FileWriter('logs/', self.sess.graph)
self.sess.run(tf.global_variables_initializer())
示例15: testCheckStatsDouble
def testCheckStatsDouble(self, dtype):
"""The correct statistics are being computed for double connection.
Connected in parallel, it's ill-defined what order the updates will happen
in. A double update could happen, or two sequential updates. E.g. If
decay_rate is 0.9, the start value is 1.0, and the target value is 0.0, the
value could progress as
1.00 -> 0.90 -> 0.81,
if the second update uses the fresh second value. Or as
1.00 -> 0.90 -> 0.80
if the second update uses the stale first value.
We fix this here by running them in sequential run calls to ensure that this
test is deterministic.
The two situations are minimally different, especially if decay_rate is
close to one (e.g. the default of 0.999).
Args:
dtype: TensorFlow datatype of input test batch.
"""
v, _, inputs = self._get_inputs(dtype)
bn = snt.BatchNorm(offset=False, scale=False, decay_rate=0.9)
with tf.name_scope("net1"):
bn(inputs, is_training=True)
with tf.name_scope("net2"):
bn(inputs, is_training=True)
update_ops_1 = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS, "net1"))
self.assertEqual(len(update_ops_1), 2)
update_ops_2 = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS, "net2"))
self.assertEqual(len(update_ops_2), 2)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
mm, mv = sess.run([bn.moving_mean, bn.moving_variance])
self.assertAllClose(np.zeros([1, 6]), mm)
self.assertAllClose(np.ones([1, 6]), mv)
sess.run(update_ops_1)
sess.run(update_ops_2)
mm, mv = sess.run([bn.moving_mean,
bn.moving_variance])
correct_mm = (1.0 - bn._decay_rate) * v
correct_mm = (1.0 - bn._decay_rate) * v + bn._decay_rate * correct_mm
correct_mv = np.ones([1, 6]) * bn._decay_rate**2
self.assertAllClose(np.reshape(correct_mm, [1, 6]), mm)
self.assertAllClose(np.reshape(correct_mv, [1, 6]), mv)