本文整理汇总了Python中tensorflow.contrib.slim.assign_from_checkpoint方法的典型用法代码示例。如果您正苦于以下问题:Python slim.assign_from_checkpoint方法的具体用法?Python slim.assign_from_checkpoint怎么用?Python slim.assign_from_checkpoint使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.slim
的用法示例。
在下文中一共展示了slim.assign_from_checkpoint方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_init_fn_to_restore
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import assign_from_checkpoint [as 别名]
def create_init_fn_to_restore(self, master_checkpoint, inception_checkpoint):
"""Creates an init operations to restore weights from various checkpoints.
Args:
master_checkpoint: path to a checkpoint which contains all weights for
the whole model.
inception_checkpoint: path to a checkpoint which contains weights for the
inception part only.
Returns:
a function to run initialization ops.
"""
all_assign_ops = []
all_feed_dict = {}
def assign_from_checkpoint(variables, checkpoint):
logging.info('Request to re-store %d weights from %s',
len(variables), checkpoint)
if not variables:
logging.error('Can\'t find any variables to restore.')
sys.exit(1)
assign_op, feed_dict = slim.assign_from_checkpoint(checkpoint, variables)
all_assign_ops.append(assign_op)
all_feed_dict.update(feed_dict)
if master_checkpoint:
assign_from_checkpoint(utils.variables_to_restore(), master_checkpoint)
if inception_checkpoint:
variables = utils.variables_to_restore(
'AttentionOcr_v1/conv_tower_fn/INCE', strip_scope=True)
assign_from_checkpoint(variables, inception_checkpoint)
def init_assign_fn(sess):
logging.info('Restoring checkpoint(s)')
sess.run(all_assign_ops, all_feed_dict)
return init_assign_fn
示例2: load_ckpt
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import assign_from_checkpoint [as 别名]
def load_ckpt(sess, model_dir, variables_to_restore=None):
ckpt = tf.train.get_checkpoint_state(model_dir)
model_path = ckpt.model_checkpoint_path
if variables_to_restore is None:
variables_to_restore = slim.get_variables_to_restore()
restore_op, restore_fd = slim.assign_from_checkpoint(
model_path, variables_to_restore)
sess.run(restore_op, feed_dict=restore_fd)
print(f'{model_path} loaded')
示例3: create_init_fn_to_restore
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import assign_from_checkpoint [as 别名]
def create_init_fn_to_restore(self, master_checkpoint,
inception_checkpoint=None):
"""Creates an init operations to restore weights from various checkpoints.
Args:
master_checkpoint: path to a checkpoint which contains all weights for
the whole model.
inception_checkpoint: path to a checkpoint which contains weights for the
inception part only.
Returns:
a function to run initialization ops.
"""
all_assign_ops = []
all_feed_dict = {}
def assign_from_checkpoint(variables, checkpoint):
logging.info('Request to re-store %d weights from %s',
len(variables), checkpoint)
if not variables:
logging.error('Can\'t find any variables to restore.')
sys.exit(1)
assign_op, feed_dict = slim.assign_from_checkpoint(checkpoint, variables)
all_assign_ops.append(assign_op)
all_feed_dict.update(feed_dict)
if master_checkpoint:
assign_from_checkpoint(utils.variables_to_restore(), master_checkpoint)
if inception_checkpoint:
variables = utils.variables_to_restore(
'AttentionOcr_v1/conv_tower_fn/INCE', strip_scope=True)
assign_from_checkpoint(variables, inception_checkpoint)
def init_assign_fn(sess):
logging.info('Restoring checkpoint(s)')
sess.run(all_assign_ops, all_feed_dict)
return init_assign_fn
示例4: load
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import assign_from_checkpoint [as 别名]
def load(self, sess):
model_saver = self.get_saver()
ckpt = tf.train.latest_checkpoint(str(self.exp_dir), latest_filename='%s_ckpt' % self.scope)
if ckpt is None:
print('[ %s ] No ckpt found...' % self.scope)
return
print('Loading %s' % str(ckpt))
init_op, init_feed = slim.assign_from_checkpoint(model_path=ckpt, var_list=self.vars(), ignore_missing_vars=True)
sess.run(init_op, init_feed)
# model_saver.restore(sess, ckpt)
return
示例5: _create_encoder
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import assign_from_checkpoint [as 别名]
def _create_encoder(preprocess_fn, network_factory, image_shape, batch_size=32,
session=None, checkpoint_path=None, read_from_file=False):
if read_from_file:
num_channels = image_shape[-1] if len(image_shape) == 3 else 1
input_var = tf.placeholder(tf.string, (None, ))
image_var = tf.map_fn(
lambda x: tf.image.decode_jpeg(
tf.read_file(x), channels=num_channels),
input_var, back_prop=False, dtype=tf.uint8)
image_var = tf.image.resize_images(image_var, image_shape[:2])
else:
input_var = tf.placeholder(tf.uint8, (None, ) + image_shape)
image_var = input_var
preprocessed_image_var = tf.map_fn(
lambda x: preprocess_fn(x, is_training=False),
image_var, back_prop=False, dtype=tf.float32)
feature_var, _ = network_factory(preprocessed_image_var)
feature_dim = feature_var.get_shape().as_list()[-1]
if session is None:
session = tf.Session()
if checkpoint_path is not None:
tf.train.get_or_create_global_step()
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
checkpoint_path, slim.get_model_variables())
session.run(init_assign_op, feed_dict=init_feed_dict)
def encoder(data_x):
out = np.zeros((len(data_x), feature_dim), np.float32)
queued_trainer.run_in_batches(
lambda x: session.run(feature_var, feed_dict=x),
{input_var: data_x}, out, batch_size)
return out
return encoder
示例6: create_init_fn_to_restore
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import assign_from_checkpoint [as 别名]
def create_init_fn_to_restore(self, master_checkpoint,
inception_checkpoint=None):
"""Creates an init operations to restore weights from various checkpoints.
Args:
master_checkpoint: path to a checkpoint which contains all weights for
the whole model.
inception_checkpoint: path to a checkpoint which contains weights for the
inception part only.
Returns:
a function to run initialization ops.
"""
all_assign_ops = []
all_feed_dict = {}
def assign_from_checkpoint(variables, checkpoint):
logging.info('Request to re-store %d weights from %s',
len(variables), checkpoint)
if not variables:
logging.error('Can\'t find any variables to restore.')
sys.exit(1)
assign_op, feed_dict = slim.assign_from_checkpoint(checkpoint, variables)
all_assign_ops.append(assign_op)
all_feed_dict.update(feed_dict)
logging.info('variables_to_restore:\n%s' % utils.variables_to_restore().keys())
logging.info('moving_average_variables:\n%s' % [v.op.name for v in tf.moving_average_variables()])
logging.info('trainable_variables:\n%s' % [v.op.name for v in tf.trainable_variables()])
if master_checkpoint:
assign_from_checkpoint(utils.variables_to_restore(), master_checkpoint)
if inception_checkpoint:
variables = utils.variables_to_restore(
'AttentionOcr_v1/conv_tower_fn/INCE', strip_scope=True)
assign_from_checkpoint(variables, inception_checkpoint)
def init_assign_fn(sess):
logging.info('Restoring checkpoint(s)')
sess.run(all_assign_ops, all_feed_dict)
return init_assign_fn