本文整理汇总了Python中tensorflow.all_variables方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.all_variables方法的具体用法?Python tensorflow.all_variables怎么用?Python tensorflow.all_variables使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.all_variables方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: restore_best_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import all_variables [as 别名]
def restore_best_model(self):
"""Load bestmodel file from eval directory, add variables for adagrad, and save to train directory"""
tf.logging.info("Restoring bestmodel for training...")
# Initialize all vars in the model
sess = tf.Session(config=util.get_config())
print("Initializing all variables...")
sess.run(tf.initialize_all_variables())
# Restore the best model from eval dir
saver = tf.train.Saver([v for v in tf.all_variables() if "Adagrad" not in v.name])
print("Restoring all non-adagrad variables from best model in eval dir...")
curr_ckpt = util.load_ckpt(saver, sess, "eval")
print("Restored %s." % curr_ckpt)
# Save this model to train dir and quit
new_model_name = curr_ckpt.split("/")[-1].replace("bestmodel", "model")
new_fname = os.path.join(FLAGS.log_root, "train", new_model_name)
print("Saving model to %s..." % (new_fname))
new_saver = tf.train.Saver() # this saver saves all variables that now exist, including Adagrad variables
new_saver.save(sess, new_fname)
print("Saved.")
exit()
示例2: build_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import all_variables [as 别名]
def build_model(sess, embedding_dim, batch_size):
model = CondGAN(
lr_imsize=cfg.TEST.LR_IMSIZE,
hr_lr_ratio=int(cfg.TEST.HR_IMSIZE/cfg.TEST.LR_IMSIZE))
embeddings = tf.placeholder(
tf.float32, [batch_size, embedding_dim],
name='conditional_embeddings')
with pt.defaults_scope(phase=pt.Phase.test):
with tf.variable_scope("g_net"):
c = sample_encoded_context(embeddings, model)
z = tf.random_normal([batch_size, cfg.Z_DIM])
fake_images = model.get_generator(tf.concat(1, [c, z]))
with tf.variable_scope("hr_g_net"):
hr_c = sample_encoded_context(embeddings, model)
hr_fake_images = model.hr_get_generator(fake_images, hr_c)
ckt_path = cfg.TEST.PRETRAINED_MODEL
if ckt_path.find('.ckpt') != -1:
print("Reading model parameters from %s" % ckt_path)
saver = tf.train.Saver(tf.all_variables())
saver.restore(sess, ckt_path)
else:
print("Input a valid model path.")
return embeddings, fake_images, hr_fake_images
示例3: build_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import all_variables [as 别名]
def build_model(self, sess):
self.init_opt()
sess.run(tf.initialize_all_variables())
if len(self.model_path) > 0:
print("Reading model parameters from %s" % self.model_path)
restore_vars = tf.all_variables()
# all_vars = tf.all_variables()
# restore_vars = [var for var in all_vars if
# var.name.startswith('g_') or
# var.name.startswith('d_')]
saver = tf.train.Saver(restore_vars)
saver.restore(sess, self.model_path)
istart = self.model_path.rfind('_') + 1
iend = self.model_path.rfind('.')
counter = self.model_path[istart:iend]
counter = int(counter)
else:
print("Created model with fresh parameters.")
counter = 0
return counter
示例4: build_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import all_variables [as 别名]
def build_model(self, sess):
self.init_opt()
sess.run(tf.initialize_all_variables())
if len(self.model_path) > 0:
print("Reading model parameters from %s" % self.model_path)
all_vars = tf.trainable_variables()
# all_vars = tf.all_variables()
restore_vars = []
for var in all_vars:
if var.name.startswith('g_') or var.name.startswith('d_'):
restore_vars.append(var)
# print(var.name)
saver = tf.train.Saver(restore_vars)
saver.restore(sess, self.model_path)
istart = self.model_path.rfind('_') + 1
iend = self.model_path.rfind('.')
counter = self.model_path[istart:iend]
counter = int(counter)
else:
print("Created model with fresh parameters.")
counter = 0
return counter
示例5: typeAndWikiDescBasedColdEmbExp
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import all_variables [as 别名]
def typeAndWikiDescBasedColdEmbExp(self, ckptName="FigerModel-20001"):
''' Train cold embeddings using wiki desc loss
'''
saver = tf.train.Saver(var_list=tf.all_variables())
print("Loading Model ... ")
if ckptName == None:
print("Given CKPT Name")
sys.exit()
else:
load_status = self.fm.loadSpecificCKPT(
saver=saver, checkpoint_dir=self.fm.checkpoint_dir,
ckptName=ckptName, attrs=self.fm._attrs)
if not load_status:
print("No model to load. Exiting")
sys.exit(0)
self._makeDescLossGraph()
self.fm.sess.run(tf.initialize_variables(self.allcoldvars))
self._trainColdEmbFromTypesAndDesc(epochsToTrain=5)
self.runEval()
# EVALUATION FOR COLD START WHEN INITIALIZING COLD EMB FROM WIKI DESC ENCODING
示例6: restore_map
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import all_variables [as 别名]
def restore_map(self, from_detection_checkpoint=True):
"""Returns a map of variables to load from a foreign checkpoint.
See parent class for details.
Args:
from_detection_checkpoint: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
variables_to_restore = {}
for variable in tf.all_variables():
if variable.op.name.startswith(self._extract_features_scope):
var_name = variable.op.name
if not from_detection_checkpoint:
var_name = (re.split('^' + self._extract_features_scope + '/',
var_name)[-1])
variables_to_restore[var_name] = variable
return variables_to_restore
示例7: train_neural_network
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import all_variables [as 别名]
def train_neural_network():
logits, last_state, _, _, _ = neural_network()
targets = tf.reshape(output_targets, [-1])
loss = tf.nn.seq2seq.sequence_loss_by_example([logits], [targets], [tf.ones_like(targets, dtype=tf.float32)], len(words))
cost = tf.reduce_mean(loss)
learning_rate = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), 5)
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.apply_gradients(zip(grads, tvars))
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver(tf.all_variables())
for epoch in range(50):
sess.run(tf.assign(learning_rate, 0.002 * (0.97 ** epoch)))
n = 0
for batche in range(n_chunk):
train_loss, _ , _ = sess.run([cost, last_state, train_op], feed_dict={input_data: x_batches[n], output_targets: y_batches[n]})
n += 1
print(epoch, batche, train_loss)
if epoch % 7 == 0:
saver.save(sess, 'poetry.module', global_step=epoch)
示例8: _testScope
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import all_variables [as 别名]
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=tf.Graph()):
if use_outer_scope:
with tf.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts
# with the proper scope.
tf.global_variables_initializer()
all_vars = tf.all_variables()
prefix = prefix or "RNN"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf.logging.info("RNN with scope: %s (%s)"
% (prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf.logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
示例9: testLocalInitOp
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import all_variables [as 别名]
def testLocalInitOp(self):
logdir = _test_dir("default_local_init_op")
with tf.Graph().as_default():
# A local variable.
v = tf.Variable([1.0, 2.0, 3.0],
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES])
# An entity which is initialized through a TABLE_INITIALIZER.
w = tf.Variable([4, 5, 6], trainable=False, collections=[])
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, w.initializer)
# This shouldn't add a variable to the VARIABLES collection responsible
# for variables that are saved/restored from checkpoints.
self.assertEquals(len(tf.all_variables()), 0)
# Suppress normal variable inits to make sure the local one is
# initialized via local_init_op.
sv = tf.train.Supervisor(logdir=logdir, init_op=None)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
self.assertAllClose([4, 5, 6], sess.run(w))
sv.stop()
示例10: testLocalInitOpForNonChief
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import all_variables [as 别名]
def testLocalInitOpForNonChief(self):
logdir = _test_dir("default_local_init_op_non_chief")
with tf.Graph().as_default():
with tf.device("/job:localhost"):
# A local variable.
v = tf.Variable([1.0, 2.0, 3.0],
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES])
# This shouldn't add a variable to the VARIABLES collection responsible
# for variables that are saved/restored from checkpoints.
self.assertEquals(len(tf.all_variables()), 0)
# Suppress normal variable inits to make sure the local one is
# initialized via local_init_op.
sv = tf.train.Supervisor(logdir=logdir, init_op=None, is_chief=False)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
示例11: testPrepareSessionWithReadyNotReadyForLocal
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import all_variables [as 别名]
def testPrepareSessionWithReadyNotReadyForLocal(self):
with tf.Graph().as_default():
v = tf.Variable(1, name="v")
w = tf.Variable(
v,
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, tf.is_variable_initialized(v).eval())
self.assertEqual(False, tf.is_variable_initialized(w).eval())
sm2 = tf.train.SessionManager(
ready_op=tf.report_uninitialized_variables(),
ready_for_local_init_op=tf.report_uninitialized_variables(
tf.all_variables()),
local_init_op=w.initializer)
with self.assertRaisesRegexp(
RuntimeError,
"Init operations did not make model ready for local_init"):
sm2.prepare_session("", init_op=None)
示例12: _testScope
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import all_variables [as 别名]
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
# REMARKS: factory(scope) is a function accepting a scope
# as an argument, such scope can be None, a string
# or a VariableScope instance.
with self.test_session(use_gpu=True, graph=tf.Graph()):
if use_outer_scope:
with tf.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts with the proper scope.
tf.global_variables_initializer()
all_vars = tf.all_variables()
prefix = prefix or "StackRNN"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf.logging.info("StackRNN with scope: %s (%s)"
% (prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf.logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
示例13: testAdaptiveGradientClip
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import all_variables [as 别名]
def testAdaptiveGradientClip(self):
with self.test_session() as session:
x, var, loss, global_step = _setup_model()
clip_gradients = tf.contrib.layers.adaptive_clipping_fn()
train = tf.contrib.layers.optimize_loss(loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
clip_gradients=clip_gradients)
tf.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertAlmostEqual(var_value, 9.8916, 4)
self.assertEqual(global_step_value, 1)
var_count = 0
for var in tf.all_variables():
if var.name.startswith("OptimizeLoss/AdaptiveMaxNorm"):
var_count += 1
self.assertEqual(2, var_count)
示例14: testStochasticVariablesWithConstantInitializer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import all_variables [as 别名]
def testStochasticVariablesWithConstantInitializer(self):
shape = (10, 20)
with tf.variable_scope(
"stochastic_variables",
custom_getter=sv.make_stochastic_variable_getter(
dist_cls=dist.NormalWithSoftplusSigma,
dist_kwargs={"validate_args": True},
param_initializers={
"mu": np.ones(shape) * 4.,
"sigma": np.ones(shape) * 2.
})):
v = tf.get_variable("sv")
for var in tf.all_variables():
if "mu" in var.name:
mu_var = var
if "sigma" in var.name:
sigma_var = var
v = tf.convert_to_tensor(v)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllEqual(np.ones(shape) * 4., sess.run(mu_var))
self.assertAllEqual(np.ones(shape) * 2., sess.run(sigma_var))
self.assertEqual(shape, sess.run(v).shape)
示例15: restore_best_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import all_variables [as 别名]
def restore_best_model():
"""Load bestmodel file from eval directory, add variables for adagrad, and save to train directory"""
tf.logging.info("Restoring best model for training...")
# Initialize all vars in the model
sess = tf.Session(config=util.get_config())
print("Initializing all variables...")
sess.run(tf.initialize_all_variables())
# Restore the best model from eval dir
saver = tf.train.Saver([v for v in tf.all_variables() if "Adagrad" not in v.name])
print("Restoring all non-adagrad variables from best model in eval dir...")
curr_ckpt = util.load_ckpt(saver, sess, "eval")
print("Restored %s." % curr_ckpt)
# Save this model to train dir and quit
new_model_name = curr_ckpt.split("/")[-1].replace("bestmodel", "model")
new_fname = os.path.join(FLAGS.log_root, "train", new_model_name)
print("Saving model to %s..." % new_fname)
new_saver = tf.train.Saver() # this saver saves all variables that now exist, including Adagrad variables
new_saver.save(sess, new_fname)
print("Saved.")
exit()