本文整理汇总了Python中tensorflow.variables_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python variables_initializer函数的具体用法?Python variables_initializer怎么用?Python variables_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了variables_initializer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __model_gradients
def __model_gradients(variable_scope: tf.VariableScope,
transformation_variable_scope: tf.VariableScope,
output: tf.Tensor, output_gradient: tf.Tensor):
trainable_variables = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
transformation_variable_scope.name)
gradients = tf.gradients(output, trainable_variables, output_gradient)
for gradient in gradients:
gradient_accumulator = tf.Variable(tf.zeros(
gradient.get_shape(), gradient.dtype), name="gradient_accumulator")
tf.add_to_collection(
'{}/model_gradients'.format(variable_scope.name),
gradient)
tf.add_to_collection(
'{}/model_gradient_accumulators'.format(variable_scope.name),
gradient_accumulator)
tf.add_to_collection(
'{}/update_model_gradient_accumulators'.format(
variable_scope.name),
tf.assign_add(gradient_accumulator, gradient).op)
with tf.control_dependencies(tf.get_collection(
"{}/update_model_gradient_accumulators".format(variable_scope.name))):
# there is no noop
tf.add(1, 1, "update_model_gradient_accumulators")
tf.variables_initializer(
tf.get_collection(
'{}/model_gradient_accumulators'.format(variable_scope.name)),
'zero_model_gradient_accumulators')
示例2: __init__
def __init__(self, model_architecture, policy_architecture, batch_size, n_particles, n_timesteps,
model_path_to_load_variables, model_path_to_save_variables,
policy_path_to_load_variables, policy_path_to_save_variables,
tb_path):
self.batch_size = batch_size
#Build Graph
# - define all the vars
# - start session
# - initialize vars or load them
# - later: save vars
#Define model
print ('Defining model..')
self.model = model_(model_architecture, batch_size=batch_size, n_particles=n_particles)
#Define policy
print ('Defining policy..')
self.policy = policy_(policy_architecture, model=self.model, batch_size=batch_size, n_particles=n_particles, n_timesteps=n_timesteps)
#Start session
self.sess = tf.Session()
#For tensorboard
# train_writer = tf.summary.FileWriter(tb_path, self.sess.graph)
writer = tf.summary.FileWriter(tb_path, graph=tf.get_default_graph())
#Init the optimizer params, Im not if this resets all the other params. need to check by loading params
self.sess.run(tf.global_variables_initializer())
#Initialize vars or load them
#Model
print ('Initializing model..')
saver = tf.train.Saver(self.model.params_dict)
if model_path_to_load_variables == '':
self.sess.run(tf.variables_initializer(self.model.params_list))
else:
saver.restore(self.sess, model_path_to_load_variables)
print ('loaded model variables ' + model_path_to_load_variables)
#Policy
print( 'Initializing policy..')
saver = tf.train.Saver(self.policy.params_dict)
if policy_path_to_load_variables == '':
self.sess.run(tf.variables_initializer(self.policy.params_list))
else:
saver.restore(self.sess, policy_path_to_load_variables)
print ('loaded policy variables ' + policy_path_to_load_variables)
self.model_path_to_save_variables = model_path_to_save_variables
self.policy_path_to_save_variables = policy_path_to_save_variables
print ('Init Complete')
示例3: test_variable
def test_variable(self):
with self.test_session() as sess:
x = tf.Variable(2.0, name="CustomName")
y = tf.constant(3.0)
z = x * y
z_new = ed.copy(z)
tf.variables_initializer([x]).run()
self.assertEqual(z_new.eval(), 6.0)
示例4: test_swap_tensor_variable
def test_swap_tensor_variable(self):
with self.test_session() as sess:
x = tf.constant(2.0)
y = tf.constant(3.0)
z = x * y
qx = tf.Variable(4.0, name="CustomName")
z_new = ed.copy(z, {x: qx})
tf.variables_initializer([qx]).run()
self.assertEqual(z_new.eval(), 12.0)
示例5: test_local_variable
def test_local_variable(self):
with self.test_session() as sess:
self.assertEquals([], tf.local_variables())
value0 = 42
tf.contrib.framework.local_variable(value0)
value1 = 43
tf.contrib.framework.local_variable(value1)
variables = tf.local_variables()
self.assertEquals(2, len(variables))
self.assertRaises(tf.OpError, sess.run, variables)
tf.variables_initializer(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
示例6: test_scan_gradients
def test_scan_gradients(self):
with self.test_session() as sess:
a = tf.Variable([1.0, 2.0, 3.0])
op = tf.scan(lambda a, x: a + x, a)
copy_op = ed.copy(op)
gradient = tf.gradients(op, [a])[0]
copy_gradient = tf.gradients(copy_op, [a])[0]
tf.variables_initializer([a]).run()
result_copy, result = sess.run([copy_gradient, gradient])
self.assertAllClose(result, [3.0, 2.0, 1.0])
self.assertAllClose(result_copy, [3.0, 2.0, 1.0])
示例7: run_session
def run_session(self, *args):
(sess_device,
model_params,) = args
graphTf = tf.Graph()
with graphTf.as_default():
with graphTf.device(sess_device): # Throws an error if GPU is specified but not available.
self._log.print3("=========== Making the CNN graph... ===============")
cnn3d = Cnn3d()
with tf.variable_scope("net"):
cnn3d.make_cnn_model( *model_params.get_args_for_arch() ) # Creates the network's graph (without optimizer).
self._log.print3("=========== Compiling the Testing Function ============")
self._log.print3("=======================================================\n")
cnn3d.setup_ops_n_feeds_to_test( self._log,
self._params.indices_fms_per_pathtype_per_layer_to_save )
# Create the saver
saver_all = tf.train.Saver() # saver_net would suffice
with tf.Session( graph=graphTf, config=tf.ConfigProto(log_device_placement=False, device_count={'CPU':999, 'GPU':99}) ) as sessionTf:
file_to_load_params_from = self._params.get_path_to_load_model_from()
if file_to_load_params_from is not None: # Load params
self._log.print3("=========== Loading parameters from specified saved model ===============")
chkpt_fname = tf.train.latest_checkpoint( file_to_load_params_from ) if os.path.isdir( file_to_load_params_from ) else file_to_load_params_from
self._log.print3("Loading parameters from:" + str(chkpt_fname))
try:
saver_all.restore(sessionTf, chkpt_fname)
self._log.print3("Parameters were loaded.")
except Exception as e: handle_exception_tf_restore(self._log, e)
else:
self._ask_user_if_test_with_random() # Asks user whether to continue with randomly initialized model. It exits if no is given.
self._log.print3("")
self._log.print3("=========== Initializing network variables ===============")
tf.variables_initializer( var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="net") ).run()
self._log.print3("Model variables were initialized.")
self._log.print3("")
self._log.print3("======================================================")
self._log.print3("=========== Testing with the CNN model ===============")
self._log.print3("======================================================\n")
res_code = inferenceWholeVolumes( *( [sessionTf, cnn3d] + self._params.get_args_for_testing() ) )
self._log.print3("")
self._log.print3("======================================================")
self._log.print3("=========== Testing session finished =================")
self._log.print3("======================================================")
示例8: load_prior
def load_prior(config, sess, saver):
logging.info('Loading prior model parameters from file ' + os.path.abspath(config.prior_model))
saver.restore(sess, os.path.abspath(config.prior_model))
# fill prior variables with the loaded values
prior_variables = tf.get_collection_ref('prior_variables')
prior_variables_dict = dict([(v.name, v) for v in prior_variables])
assign_tensors = []
with tf.variable_scope('prior'):
for v in tf.trainable_variables():
prior_name = 'loss/prior/'+v.name
prior_variable = prior_variables_dict[prior_name]
assign_tensors.append(prior_variable.assign(v))
tf.variables_initializer(prior_variables)
sess.run(assign_tensors)
示例9: yolo_non_max_suppression
def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):
"""
Applies Non-max suppression (NMS) to set of boxes
Arguments:
scores -- tensor of shape (None,), output of yolo_filter_boxes()
boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later)
classes -- tensor of shape (None,), output of yolo_filter_boxes()
max_boxes -- integer, maximum number of predicted boxes you'd like
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (, None), predicted score for each box
boxes -- tensor of shape (4, None), predicted box coordinates
classes -- tensor of shape (, None), predicted class for each box
"""
max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()
K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor
# Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep
nms_indices = tf.image.non_max_suppression(boxes,scores,max_boxes_tensor,iou_threshold=iou_threshold)
# Use K.gather() to select only nms_indices from scores, boxes and classes
scores = K.gather(scores,nms_indices)
boxes = K.gather(boxes,nms_indices)
classes = K.gather(classes,nms_indices)
return scores, boxes, classes
示例10: adam_variables_initializer
def adam_variables_initializer(opt, var_list):
adam_vars = [opt.get_slot(var, name)
for name in opt.get_slot_names()
for var in var_list]
if isinstance(opt, tf.train.AdamOptimizer):
adam_vars.extend(list(opt._get_beta_accumulators()))
return tf.variables_initializer(adam_vars)
示例11: initializeOrRestore
def initializeOrRestore(self):
self.ckptDir = os.path.join(self.checkpoint_dir, self.dataset.name)
self.ckptPrefix = os.path.join(self.ckptDir, self.name, self.name)
vgg_ckpt_file = os.path.join(self.ckptDir, 'vgg_16', 'vgg_16.ckpt')
mt_ckpt_file = layers.latest_checkpoint(os.path.join(self.ckptDir, 'mt'))
# ckpt_file = layers.latest_checkpoint(os.path.join(self.ckptDir, 'vgg_16', 'vgg_16.ckpt'))
globalVars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
if vgg_ckpt_file is not None and tf.train.checkpoint_exists(vgg_ckpt_file):
varsInCkpt, varsNotInCkpt = layers.scan_checkpoint_for_vars(vgg_ckpt_file, globalVars)
if len(varsInCkpt) != 0:
restorationSaver = tf.train.Saver(varsInCkpt)
self.sess.run(tf.report_uninitialized_variables(var_list=varsInCkpt))
restorationSaver.restore(self.sess, vgg_ckpt_file)
else:
varsNotInCkpt = globalVars
if mt_ckpt_file is not None and tf.train.checkpoint_exists(mt_ckpt_file):
varsInCkpt, varsNotInCkpt = layers.scan_checkpoint_for_vars(mt_ckpt_file, varsNotInCkpt)
varsInCkpt, varsNotInCkpt = layers.replaceVarInListsByName(varsInCkpt, varsNotInCkpt, 'fc6')
if len(varsInCkpt) != 0:
restorationSaver = tf.train.Saver(varsInCkpt)
self.sess.run(tf.report_uninitialized_variables(var_list=varsInCkpt))
restorationSaver.restore(self.sess, mt_ckpt_file)
else:
varsNotInCkpt = globalVars
self.saver = tf.train.Saver()
self.sess.run(tf.group(tf.variables_initializer(varsNotInCkpt), tf.local_variables_initializer()))
示例12: style_transfer_train
def style_transfer_train(loss, img_var, initial_lr=3.0, decayed_lr=0.1, decay_lr_at=180, max_iter=200, print_every=50):
# Create and initialize the Adam optimizer
lr_var = tf.Variable(initial_lr, name="lr")
# Create train_op that updates the generated image when run
with tf.variable_scope("optimizer") as opt_scope:
train_op = tf.train.AdamOptimizer(lr_var).minimize(loss, var_list=[img_var])
# Initialize the generated image and optimization variables
opt_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=opt_scope.name)
sess.run(tf.variables_initializer([lr_var, img_var] + opt_vars))
# Create an op that will clamp the image values when run
clamp_image_op = tf.assign(img_var, tf.clip_by_value(img_var, -1.5, 1.5))
imgs_in_process = []
# Hardcoded handcrafted
for t in range(max_iter):
# Take an optimization step to update img_var
sess.run(train_op)
if t < decay_lr_at:
sess.run(clamp_image_op)
if t == decay_lr_at:
sess.run(tf.assign(lr_var, decayed_lr))
if t % print_every == 0:
print("train step: %d" % t)
img = sess.run(img_var)
imgs_in_process.append(img[0])
print("train step: %d" % t)
final_img = sess.run(img_var)[0]
return imgs_in_process, final_img
示例13: _get_ece
def _get_ece(self, ece_op, update_op):
"""Return scalar expected calibration error."""
with self.test_session() as sess:
metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES)
sess.run(tf.variables_initializer(var_list=metrics_vars))
_ = sess.run(update_op)
return sess.run(ece_op)
示例14: run
def run(self, variables=None, use_coordinator=True, *args, **kwargs):
"""A simple wrapper to run inference.
1. Initialize algorithm via `initialize`.
2. (Optional) Build a TensorFlow summary writer for TensorBoard.
3. (Optional) Initialize TensorFlow variables.
4. (Optional) Start queue runners.
5. Run `update` for `self.n_iter` iterations.
6. While running, `print_progress`.
7. Finalize algorithm via `finalize`.
8. (Optional) Stop queue runners.
To customize the way inference is run, run these steps
individually.
Args:
variables: list, optional.
A list of TensorFlow variables to initialize during inference.
Default is to initialize all variables (this includes
reinitializing variables that were already initialized). To
avoid initializing any variables, pass in an empty list.
use_coordinator: bool, optional.
Whether to start and stop queue runners during inference using a
TensorFlow coordinator. For example, queue runners are necessary
for batch training with file readers.
*args:
Passed into `initialize`.
**kwargs:
Passed into `initialize`.
"""
self.initialize(*args, **kwargs)
if variables is None:
init = tf.global_variables_initializer()
else:
init = tf.variables_initializer(variables)
# Feed placeholders in case initialization depends on them.
feed_dict = {}
for key, value in six.iteritems(self.data):
if isinstance(key, tf.Tensor) and "Placeholder" in key.op.type:
feed_dict[key] = value
init.run(feed_dict)
if use_coordinator:
# Start input enqueue threads.
self.coord = tf.train.Coordinator()
self.threads = tf.train.start_queue_runners(coord=self.coord)
for _ in range(self.n_iter):
info_dict = self.update()
self.print_progress(info_dict)
self.finalize()
if use_coordinator:
# Ask threads to stop.
self.coord.request_stop()
self.coord.join(self.threads)
示例15: initialize
def initialize(self, sess):
# Initial file lists are empty
np_paths = []
ss_paths = []
variables = tf.global_variables()
# Initialize all variables first
sess.run(tf.variables_initializer(variables, name='init'))
if self.pretrained_model is not None:
if self.pretrained_model.endswith('.ckpt'):
# Fresh train directly from ImageNet weights
print('Loading initial model weights from {:s}'.format(self.pretrained_model))
var_keep_dic = self.get_variables_in_checkpoint_file(self.pretrained_model)
# Get the variables to restore, ignoring the variables to fix
variables_to_restore = self.net.get_variables_to_restore(variables, var_keep_dic)
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, self.pretrained_model)
print('Loaded.')
else:
# Restore from checkpoint and meta file
self.restore_ckpt_from_dir(sess, self.net, self.pretrained_model)
print('Loaded.')
last_snapshot_iter = 0
rate = cfg.TRAIN.LEARNING_RATE
stepsizes = list(cfg.TRAIN.STEPSIZE)
return rate, last_snapshot_iter, stepsizes, np_paths, ss_paths