本文整理汇总了Python中tensorflow.trainable_variables方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.trainable_variables方法的具体用法?Python tensorflow.trainable_variables怎么用?Python tensorflow.trainable_variables使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.trainable_variables方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_adam
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trainable_variables [as 别名]
def test_adam(self):
with self.test_session() as sess:
w = tf.get_variable(
"w",
shape=[3],
initializer=tf.constant_initializer([0.1, -0.2, -0.1]))
x = tf.constant([0.4, 0.2, -0.5])
loss = tf.reduce_mean(tf.square(x - w))
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
global_step = tf.train.get_or_create_global_step()
optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2)
train_op = optimizer.apply_gradients(zip(grads, tvars), global_step)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
for _ in range(100):
sess.run(train_op)
w_np = sess.run(w)
self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2)
示例2: _decay
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trainable_variables [as 别名]
def _decay(self):
"""L2 weight decay loss."""
if self.decay_cost is not None:
return self.decay_cost
costs = []
if self.device_name is None:
for var in tf.trainable_variables():
if var.op.name.find(r'DW') > 0:
costs.append(tf.nn.l2_loss(var))
else:
for layer in self.layers:
for var in layer.params_device[self.device_name].values():
if (isinstance(var, tf.Variable) and
var.op.name.find(r'DW') > 0):
costs.append(tf.nn.l2_loss(var))
self.decay_cost = tf.multiply(self.hps.weight_decay_rate,
tf.add_n(costs))
return self.decay_cost
示例3: testCreateLogisticClassifier
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trainable_variables [as 别名]
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 2)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, 'GPU:0')
self.assertDeviceEqual(v.device, 'CPU:0')
示例4: testCreateSingleclone
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trainable_variables [as 别名]
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, 'GPU:0')
self.assertDeviceEqual(v.device, 'CPU:0')
示例5: _add_train_op
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trainable_variables [as 别名]
def _add_train_op(self):
"""Sets self._train_op, op to run for training."""
hps = self._hps
self._lr_rate = tf.maximum(
hps.min_lr, # min_lr_rate.
tf.train.exponential_decay(hps.lr, self.global_step, 30000, 0.98))
tvars = tf.trainable_variables()
with tf.device(self._get_gpu(self._num_gpus-1)):
grads, global_norm = tf.clip_by_global_norm(
tf.gradients(self._loss, tvars), hps.max_grad_norm)
tf.summary.scalar('global_norm', global_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr_rate)
tf.summary.scalar('learning rate', self._lr_rate)
self._train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=self.global_step, name='train_step')
示例6: _build_train_op
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trainable_variables [as 别名]
def _build_train_op(self):
"""Build training specific ops for the graph."""
self.lrn_rate = tf.constant(self.hps.lrn_rate, tf.float32)
tf.summary.scalar('learning_rate', self.lrn_rate)
trainable_variables = tf.trainable_variables()
grads = tf.gradients(self.cost, trainable_variables)
if self.hps.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(self.lrn_rate)
elif self.hps.optimizer == 'mom':
optimizer = tf.train.MomentumOptimizer(self.lrn_rate, 0.9)
apply_op = optimizer.apply_gradients(
zip(grads, trainable_variables),
global_step=self.global_step, name='train_step')
train_ops = [apply_op] + self._extra_train_ops
self.train_op = tf.group(*train_ops)
# TODO(xpan): Consider batch_norm in contrib/layers/python/layers/layers.py
示例7: print_vectors
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trainable_variables [as 别名]
def print_vectors(embedding_key, vocab_path, word_vector_file):
"""Print vectors from the given variable."""
_, rev_vocab = wmt.initialize_vocabulary(vocab_path)
vectors_variable = [v for v in tf.trainable_variables()
if embedding_key == v.name]
if len(vectors_variable) != 1:
data.print_out("Word vector variable not found or too many.")
sys.exit(1)
vectors_variable = vectors_variable[0]
vectors = vectors_variable.eval()
l, s = vectors.shape[0], vectors.shape[1]
data.print_out("Printing %d word vectors from %s to %s."
% (l, embedding_key, word_vector_file))
with tf.gfile.GFile(word_vector_file, mode="w") as f:
# Lines have format: dog 0.045123 -0.61323 0.413667 ...
for i in xrange(l):
f.write(rev_vocab[i])
for j in xrange(s):
f.write(" %.8f" % vectors[i][j])
f.write("\n")
示例8: weight_decay_and_noise
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trainable_variables [as 别名]
def weight_decay_and_noise(loss, hparams, learning_rate, var_list=None):
"""Apply weight decay and weight noise."""
if var_list is None:
var_list = tf.trainable_variables()
decay_vars = [v for v in var_list]
noise_vars = [v for v in var_list if "/body/" in v.name]
weight_decay_loss = weight_decay(hparams.weight_decay, decay_vars)
if hparams.weight_decay:
tf.summary.scalar("losses/weight_decay", weight_decay_loss)
weight_noise_ops = weight_noise(hparams.weight_noise, learning_rate,
noise_vars)
with tf.control_dependencies(weight_noise_ops):
loss = tf.identity(loss)
loss += weight_decay_loss
return loss
示例9: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trainable_variables [as 别名]
def __init__(self, ob_dim, ac_dim): #pylint: disable=W0613
X = tf.placeholder(tf.float32, shape=[None, ob_dim*2+ac_dim*2+2]) # batch of observations
vtarg_n = tf.placeholder(tf.float32, shape=[None], name='vtarg')
wd_dict = {}
h1 = tf.nn.elu(dense(X, 64, "h1", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict))
h2 = tf.nn.elu(dense(h1, 64, "h2", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict))
vpred_n = dense(h2, 1, "hfinal", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict)[:,0]
sample_vpred_n = vpred_n + tf.random_normal(tf.shape(vpred_n))
wd_loss = tf.get_collection("vf_losses", None)
loss = tf.reduce_mean(tf.square(vpred_n - vtarg_n)) + tf.add_n(wd_loss)
loss_sampled = tf.reduce_mean(tf.square(vpred_n - tf.stop_gradient(sample_vpred_n)))
self._predict = U.function([X], vpred_n)
optim = kfac.KfacOptimizer(learning_rate=0.001, cold_lr=0.001*(1-0.9), momentum=0.9, \
clip_kl=0.3, epsilon=0.1, stats_decay=0.95, \
async=1, kfac_update=2, cold_iter=50, \
weight_decay_dict=wd_dict, max_grad_norm=None)
vf_var_list = []
for var in tf.trainable_variables():
if "vf" in var.name:
vf_var_list.append(var)
update_op, self.q_runner = optim.minimize(loss, loss_sampled, var_list=vf_var_list)
self.do_update = U.function([X, vtarg_n], update_op) #pylint: disable=E1101
U.initialize() # Initialize uninitialized TF variables
示例10: _add_train_op
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trainable_variables [as 别名]
def _add_train_op(self):
# In regression, the objective loss is Mean Squared Error (MSE).
self.loss = tf.losses.mean_squared_error(labels = self._y, predictions = self.output)
tvars = tf.trainable_variables()
gradients = tf.gradients(self.loss, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE)
# Clip the gradients
with tf.device("/gpu:{}".format(self._hps.dqn_gpu_num)):
grads, global_norm = tf.clip_by_global_norm(gradients, self._hps.max_grad_norm)
# Add a summary
tf.summary.scalar('global_norm', global_norm)
# Apply adagrad optimizer
optimizer = tf.train.AdamOptimizer(self._hps.lr)
with tf.device("/gpu:{}".format(self._hps.dqn_gpu_num)):
self.train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step, name='train_step')
self.variable_summaries('dqn_loss',self.loss)
示例11: chpt_to_dict_arrays_simple
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trainable_variables [as 别名]
def chpt_to_dict_arrays_simple(file_name):
"""
Convert a checkpoint into into a dictionary of numpy arrays
for later use in TensorRT NMT sample.
"""
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
saver = tf.train.import_meta_graph(file_name)
dir_name = os.path.dirname(os.path.abspath(file_name))
saver.restore(sess, tf.train.latest_checkpoint(dir_name))
params = {}
print ('\nFound the following trainable variables:')
with sess.as_default():
variables = tf.trainable_variables()
for v in variables:
params[v.name] = v.eval(session=sess)
print ("{0} {1}".format(v.name, params[v.name].shape))
#use default value
params["forget_bias"] = 1.0
return params
示例12: _build_policy_net
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trainable_variables [as 别名]
def _build_policy_net(self):
"""Build policy network"""
with tf.variable_scope(self.scope):
self.state_input = tf.placeholder(tf.float32, [None, self.state_size])
self.action = tf.placeholder(tf.int32, [None])
self.target = tf.placeholder(tf.float32, [None])
layer_1 = tf_utils.fc(self.state_input, self.n_hidden_1, tf.nn.relu)
layer_2 = tf_utils.fc(layer_1, self.n_hidden_2, tf.nn.relu)
self.action_values = tf_utils.fc(layer_2, self.action_size)
action_mask = tf.one_hot(self.action, self.action_size, 1.0, 0.0)
self.action_prob = tf.nn.softmax(self.action_values)
self.action_value_pred = tf.reduce_sum(self.action_prob * action_mask, 1)
# l2 regularization
self.l2_loss = tf.add_n([ tf.nn.l2_loss(v) for v in tf.trainable_variables() ])
self.pg_loss = tf.reduce_mean(-tf.log(self.action_value_pred) * self.target)
self.loss = self.pg_loss + 0.002 * self.l2_loss
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
self.train_op = self.optimizer.minimize(self.loss, global_step=tf.contrib.framework.get_global_step())
示例13: _build_policy_net
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trainable_variables [as 别名]
def _build_policy_net(self):
"""Build policy network"""
with tf.variable_scope(self.scope):
self.state_input = tf.placeholder(tf.float32, [None, self.state_size])
self.action = tf.placeholder(tf.int32, [None])
self.target = tf.placeholder(tf.float32, [None])
layer_1 = tf_utils.fc(self.state_input, self.n_hidden_1, tf.nn.relu)
layer_2 = tf_utils.fc(layer_1, self.n_hidden_2, tf.nn.relu)
self.value = tf_utils.fc(layer_2, 1)
self.action_values = tf_utils.fc(layer_2, self.action_size)
action_mask = tf.one_hot(self.action, self.action_size, 1.0, 0.0)
self.action_value_pred = tf.reduce_sum(tf.nn.softmax(self.action_values) * action_mask, 1)
self.action_probs = tf.nn.softmax(self.action_values)
self.value_loss = tf.reduce_mean(tf.square(self.target - self.value))
self.pg_loss = tf.reduce_mean(-tf.log(self.action_value_pred) * (self.target - self.value))
self.l2_loss = tf.add_n([ tf.nn.l2_loss(v) for v in tf.trainable_variables() ])
self.loss = self.pg_loss + 5*self.value_loss + 0.002 * self.l2_loss
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
self.train_op = self.optimizer.minimize(self.loss, global_step=tf.contrib.framework.get_global_step())
示例14: _get_variables_to_train
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trainable_variables [as 别名]
def _get_variables_to_train():
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if FLAGS.trainable_scopes is None:
return tf.trainable_variables()
else:
scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
return variables_to_train
示例15: _init_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import trainable_variables [as 别名]
def _init_graph(self):
# Collect inputs.
self.input_names = []
for param in inspect.signature(self._build_func).parameters.values():
if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
self.input_names.append(param.name)
self.num_inputs = len(self.input_names)
assert self.num_inputs >= 1
# Choose name and scope.
if self.name is None:
self.name = self._build_func_name
self.scope = tf.get_default_graph().unique_name(self.name.replace('/', '_'), mark_as_used=False)
# Build template graph.
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
assert tf.get_variable_scope().name == self.scope
with absolute_name_scope(self.scope): # ignore surrounding name_scope
with tf.control_dependencies(None): # ignore surrounding control_dependencies
self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
out_expr = self._build_func(*self.input_templates, is_template_graph=True, **self.static_kwargs)
# Collect outputs.
assert is_tf_expression(out_expr) or isinstance(out_expr, tuple)
self.output_templates = [out_expr] if is_tf_expression(out_expr) else list(out_expr)
self.output_names = [t.name.split('/')[-1].split(':')[0] for t in self.output_templates]
self.num_outputs = len(self.output_templates)
assert self.num_outputs >= 1
# Populate remaining fields.
self.input_shapes = [shape_to_list(t.shape) for t in self.input_templates]
self.output_shapes = [shape_to_list(t.shape) for t in self.output_templates]
self.input_shape = self.input_shapes[0]
self.output_shape = self.output_shapes[0]
self.vars = OrderedDict([(self.get_var_localname(var), var) for var in tf.global_variables(self.scope + '/')])
self.trainables = OrderedDict([(self.get_var_localname(var), var) for var in tf.trainable_variables(self.scope + '/')])
# Run initializers for all variables defined by this network.