本文整理汇总了Python中tensorflow.all_variables函数的典型用法代码示例。如果您正苦于以下问题:Python all_variables函数的具体用法?Python all_variables怎么用?Python all_variables使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了all_variables函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: reset_module
def reset_module(self, module):
temp = set(tf.all_variables())
module.backward(module.loss)
self.sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
示例2: __init__
def __init__(self, network_architecture, transfer_fct=tf.nn.softplus,
learning_rate=0.001, batch_size=100,load_model = False,checkpoint_folder = './vae_checkpoints'):
self.network_architecture = network_architecture
self.transfer_fct = transfer_fct
self.learning_rate = learning_rate
self.batch_size = batch_size
# tf Graph input
self.x = tf.placeholder(tf.float32, [None, network_architecture["n_input"]])
# Create autoencoder network
# Initializing the tensor flow variables
# Launch the session
self.sess = tf.InteractiveSession()
#self.saver = tf.train.Saver(tf.all_variables())
self._create_network()
# Define loss function based variational upper-bound and
# corresponding optimizer
self._create_loss_optimizer()
print len(tf.all_variables())
self.saver = tf.train.Saver(var_list=tf.all_variables())
if load_model == False:
init = tf.initialize_all_variables()
self.sess.run(init)
else:
ckpt = tf.train.get_checkpoint_state(checkpoint_folder)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
print "Loaded model:",ckpt.model_checkpoint_path
self.sess.run(tf.all_variables())
示例3: _discriminator_model
def _discriminator_model(sess, features, disc_input):
# Fully convolutional model
mapsize = 3
layers = [64, 128, 256, 512]
old_vars = tf.all_variables()
model = Model('DIS', 2*disc_input - 1)
for layer in range(len(layers)):
nunits = layers[layer]
stddev_factor = 2.0
model.add_conv2d(nunits, mapsize=mapsize, stride=2, stddev_factor=stddev_factor)
model.add_batch_norm()
model.add_relu()
# Finalization a la "all convolutional net"
model.add_conv2d(nunits, mapsize=mapsize, stride=1, stddev_factor=stddev_factor)
model.add_batch_norm()
model.add_relu()
model.add_conv2d(nunits, mapsize=1, stride=1, stddev_factor=stddev_factor)
model.add_batch_norm()
model.add_relu()
# Linearly map to real/fake and return average score
# (softmax will be applied later)
model.add_conv2d(1, mapsize=1, stride=1, stddev_factor=stddev_factor)
model.add_mean()
new_vars = tf.all_variables()
disc_vars = list(set(new_vars) - set(old_vars))
return model.get_output(), disc_vars
示例4: train_dnn
def train_dnn(data_folder, model_file):
# Output of dnn using input x
y = DNN(x)
print "Loading training pickles..."
train_set = import_data.load_dataset(data_folder + '/train_data.pickle',
data_folder + '/train_labels.pickle',
context_frames=context_frames)
# Create the dir for the model
if not os.path.isdir('%s/models/%s'%(save_loc,start_date)):
try:
os.makedirs('%s/models/%s'%(save_loc,start_date))
except OSError:
if not os.path.isdir('%s/models/%s'%(save_loc,start_date)):
raise
# Create the session
global sess
sess = tf.InteractiveSession()
global summary_op
global train_writer
global saver
saver = tf.train.Saver()
# Op for merging all summaries
summary_op = tf.merge_all_summaries()
# Summary Writer
train_writer = tf.train.SummaryWriter('%ssummaries/%s'%(save_loc, start_date), sess.graph)
# Cost function
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
# Optimizer
# For gradient descend, learning rate = 0.002 (see Hinton et al.)
# For AdamOptimizer, learning rate = 0.0001 (better than default (exp 1.2))
if (optimizer_name == 'Adam'):
# Hacky solution for always making sure that the beta2_power var
# is always initialized
temp = set(tf.all_variables())
optimizer = tf.train.AdamOptimizer(1e-4).minimize(cost)
sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
else:
optimizer = tf.train.GradientDescentOptimizer(0.02).minimize(cost)
if model_file:
saver.restore(sess, model_file)
print "Model restored"
else:
# Initialization
init_op = tf.initialize_all_variables()
sess.run(init_op)
print("Training network. Date: %s" % start_date)
train(train_set, y, cost, optimizer)
save_path = saver.save(sess, "%s/models/%s/model.ckpt"%(save_loc, start_date))
print("Model saved in file: %s" % save_path)
print("Summaries written to summaries/%s" % start_date)
evaluate_dnn(data_folder, y)
示例5: sample
def sample(self, args):
if self.model is None:
# Allow sample to be usable outside of main()
with open(os.path.join(args.save_dir, 'config.pkl')) as f:
saved_args = cPickle.load(f)
with open(os.path.join(args.save_dir, 'chars_vocab.pkl')) as f:
self.chars, self.vocab = cPickle.load(f)
self.model = Model(saved_args, True)
with tf.Session() as sess:
tf.initialize_all_variables().run()
saver = tf.train.Saver(tf.all_variables())
ckpt = tf.train.get_checkpoint_state(args.save_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
return self.model.sample(sess, self.chars, self.vocab, args.n, args.prime)
else:
with tf.Session() as sess:
tf.initialize_all_variables().run()
saver = tf.train.Saver(tf.all_variables())
ckpt = tf.train.get_checkpoint_state(args.save_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
return self.model.sample(sess, self.chars, self.vocab, args.n, args.prime)
return None
示例6: _create_initializers
def _create_initializers(self):
if self._var_count != len(tf.all_variables()):
self._saver = tf.train.Saver(tf.all_variables(), max_to_keep=5)
self._init = tf.initialize_all_variables()
self._check_inited = tf.assert_variables_initialized()
self._var_count = len(tf.all_variables())
if self._summary_writer:
self._summaries = tf.merge_all_summaries()
self._summary_writer.add_graph(tf.get_default_graph().as_graph_def())
示例7: _create_initializers
def _create_initializers(self):
if self._var_count != len(tf.all_variables()):
save_dir = os.path.dirname(self._save_path) if self._save_path else None
if save_dir and not tf.gfile.IsDirectory(save_dir):
tf.gfile.MakeDirs(save_dir)
self._saver = tf.train.Saver(tf.all_variables(), max_to_keep=5)
self._init = tf.initialize_all_variables()
self._check_inited = tf.assert_variables_initialized()
self._var_count = len(tf.all_variables())
if self._summary_writer:
self._summaries = tf.merge_all_summaries()
self._summary_writer.add_graph(tf.get_default_graph())
示例8: testGraphMatchesImmediate
def testGraphMatchesImmediate(self):
"""Ensures that the vars line up between the two modes."""
with tf.Graph().as_default():
input_pt = prettytensor.wrap(self.input)
self.BuildLargishGraph(input_pt)
normal_names = sorted([v.name for v in tf.all_variables()])
with tf.Graph().as_default():
template = prettytensor.template('input')
self.BuildLargishGraph(template).construct(
input=prettytensor.wrap(self.input))
template_names = sorted([v.name for v in tf.all_variables()])
self.assertSequenceEqual(normal_names, template_names)
示例9: register_all_variables_and_grards
def register_all_variables_and_grards(y):
all_vars = tf.all_variables()
for v in tf.all_variables():
tf.histogram_summary('hist_'+v.name, v)
if v.get_shape() == []:
tf.scalar_summary('scal_'+v.name, v)
grad_vars = opt.compute_gradients(y,all_vars) #[ (T(gradient),variable) ]
for (dldw,v) in grad_vars:
if dldw != None:
tf.histogram_summary('hist_'+v.name+'dW', dldw)
if v.get_shape() == [] or dldw.get_shape() == []:
tf.scalar_summary('scal_'+v.name+'dW', dldw)
l2norm_dldw = tf.reduce_mean(tf.square(dldw))
tf.scalar_summary('scal_'+v.name+'dW_l2_norm', l2norm_dldw)
示例10: add_aux_layer
def add_aux_layer(self, aux_attrs):
layer_name = aux_attrs['layer_name']
with tf.variable_scope(layer_name):
init_op = tf.initialize_all_variables()
saver = tf.train.Saver(tf.all_variables())
tensors_dict = {'%_init_op' % layer_name: init_op, '%s_saver_op' % layer_name: saver}
return tensors_dict
示例11: guarantee_initialized_variables
def guarantee_initialized_variables(self, session, list_of_variables = None):
if list_of_variables is None:
list_of_variables = tf.all_variables()
uninitialized_variables = list(tf.get_variable(name) for name in
session.run(tf.report_uninitialized_variables(list_of_variables)))
session.run(tf.initialize_variables(uninitialized_variables))
return uninitialized_variables
示例12: train_model
def train_model(args):
data_loader = InputHandler(args.data_dir, args.batch_size, args.result_length)
args.vocabulary_size = data_loader.vocabulary_size
# Save the original files, so that we can load the model when sampling
with open(os.path.join(args.snapshots_dir, CONFIGURATION_FILE), 'wb') as f:
cPickle.dump(args, f)
with open(os.path.join(args.snapshots_dir, WORDS_VOCABULARY_FILE), 'wb') as f:
cPickle.dump((data_loader.words, data_loader.vocabulary), f)
model = RNNModel(args.rnn_size, args.network_depth, args.batch_size, args.result_length,
args.vocabulary_size, args.gradient)
with tf.Session() as session:
tf.initialize_all_variables().run()
saver = tf.train.Saver(tf.all_variables())
for e in range(args.num_epochs):
session.run(tf.assign(model.lr, args.training_rate * (args.decay_rate ** e)))
data_loader.set_batch_pointer_to_zero()
state = model.initial_state.eval()
for b in range(data_loader.num_batches):
x, y = data_loader.get_next_batch()
feed = {model.input_data: x, model.targets: y, model.initial_state: state}
train_loss, state, _ = session.run([model.cost, model.final_state, model.train_op], feed)
if (e * data_loader.num_batches + b) % args.snapshot == 0 \
or (e==args.num_epochs-1 and b == data_loader.num_batches-1): # save for the last result
snapshot_path = os.path.join(args.snapshots_dir, 'model.ckpt')
saver.save(session, snapshot_path, global_step = e * data_loader.num_batches + b)
print("Model snapshot was taken to {}".format(snapshot_path))
示例13: run_model_image
def run_model_image(checkpoint_file, image):
"""
Run an image through the trained model and vizualize its activations
:param checkpoint_file: The saved model parameters for the basic model
:param image: The supplied image (same dimensions as training).
"""
with tf.Graph().as_default():
image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, 1])
image = tf.image.per_image_whitening(image)
image = tf.reshape(image, [1, IMAGE_SIZE, IMAGE_SIZE, 1])
image = tf.cast(image, tf.float32)
relu1, relu2, relu3 = inference(train=False, images=image, visualize=True)
saver = tf.train.Saver(tf.all_variables())
sess = tf.Session()
saver.restore(sess=sess, save_path=checkpoint_file)
units = relu1.eval(session=sess)
plotNNFilter(units)
units = relu2.eval(session=sess)
plotNNFilter(units)
units = relu3.eval(session=sess)
plotNNFilter(units)
示例14: restore_fn
def restore_fn(self, checkpoint_path, from_detection_checkpoint=True):
"""Return callable for loading a checkpoint into the tensorflow graph.
Args:
checkpoint_path: path to checkpoint to restore.
from_detection_checkpoint: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Returns:
a callable which takes a tf.Session as input and loads a checkpoint when
run.
"""
variables_to_restore = {}
for variable in tf.all_variables():
if variable.op.name.startswith(self._extract_features_scope):
var_name = variable.op.name
if not from_detection_checkpoint:
var_name = (
re.split('^' + self._extract_features_scope + '/', var_name)[-1])
variables_to_restore[var_name] = variable
# TODO: Load variables selectively using scopes.
variables_to_restore = (
variables_helper.get_variables_available_in_checkpoint(
variables_to_restore, checkpoint_path))
saver = tf.train.Saver(variables_to_restore)
def restore(sess):
saver.restore(sess, checkpoint_path)
return restore
示例15: testPrepareSessionWithReadyForLocalInitOp
def testPrepareSessionWithReadyForLocalInitOp(self):
with tf.Graph().as_default():
v = tf.Variable(1, name="v")
w = tf.Variable(
v,
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, tf.is_variable_initialized(v).eval())
self.assertEqual(False, tf.is_variable_initialized(w).eval())
sm2 = tf.train.SessionManager(
ready_op=tf.report_uninitialized_variables(),
ready_for_local_init_op=tf.report_uninitialized_variables(
tf.all_variables()),
local_init_op=w.initializer)
sess = sm2.prepare_session("", init_op=v.initializer)
self.assertEqual(
True,
tf.is_variable_initialized(sess.graph.get_tensor_by_name("v:0")).eval(
session=sess))
self.assertEqual(
True,
tf.is_variable_initialized(sess.graph.get_tensor_by_name("w:0")).eval(
session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))