本文整理汇总了Python中tensorflow.contrib.slim.model_variable方法的典型用法代码示例。如果您正苦于以下问题:Python slim.model_variable方法的具体用法?Python slim.model_variable怎么用?Python slim.model_variable使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.slim
的用法示例。
在下文中一共展示了slim.model_variable方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _add_pggan_kwargs
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import model_variable [as 别名]
def _add_pggan_kwargs(data_batched, sources, targets, alpha_grow, generator_kwargs, discriminator_kwargs):
additional_kwargs = {'is_growing': FLAGS.is_growing, 'alpha_grow': alpha_grow, 'do_self_attention': FLAGS.do_self_attention, 'self_attention_hw': FLAGS.self_attention_hw}
generator_kwargs.update(**additional_kwargs)
discriminator_kwargs.update(**additional_kwargs)
generator_kwargs['do_pixel_norm'] = FLAGS.do_pixel_norm
generator_kwargs['dtype'] = targets.dtype
if FLAGS.use_gdrop:
discriminator_kwargs[GDROP_STRENGTH_VAR_NAME] = slim.model_variable(GDROP_STRENGTH_VAR_NAME, shape=[],
dtype=targets.dtype,
initializer=tf.zeros_initializer,
trainable=False)
else:
discriminator_kwargs['do_dgrop'] = False
# Conditional related params.
if FLAGS.use_conditional_labels:
conditional_labels = data_batched.get('conditional_labels', None)
if conditional_labels is not None:
generator_kwargs['arg_scope_fn'] = functools.partial(pggan.conditional_progressive_gan_generator_arg_scope,
conditional_layer=conditional_labels)
source_embed = GanModel._embed_one_hot(conditional_labels, FLAGS.conditional_embed_dim, )
discriminator_kwargs['conditional_embed'] = source_embed
示例2: __init__
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import model_variable [as 别名]
def __init__(self, net, labels_one_hot, model_params, method_params):
"""Stores argument in member variable for further use.
Args:
net: A tensor with shape [batch_size, num_features, feature_size] which
contains some extracted image features.
labels_one_hot: An optional (can be None) ground truth labels for the
input features. Is a tensor with shape
[batch_size, seq_length, num_char_classes]
model_params: A namedtuple with model parameters (model.ModelParams).
method_params: A SequenceLayerParams instance.
"""
self._params = model_params
self._mparams = method_params
self._net = net
self._labels_one_hot = labels_one_hot
self._batch_size = net.get_shape().dims[0].value
# Initialize parameters for char logits which will be computed on the fly
# inside an LSTM decoder.
self._char_logits = {}
regularizer = slim.l2_regularizer(self._mparams.weight_decay)
self._softmax_w = slim.model_variable(
'softmax_w',
[self._mparams.num_lstm_units, self._params.num_char_classes],
initializer=orthogonal_initializer,
regularizer=regularizer)
self._softmax_b = slim.model_variable(
'softmax_b', [self._params.num_char_classes],
initializer=tf.zeros_initializer(),
regularizer=regularizer)
示例3: vlad
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import model_variable [as 别名]
def vlad(feature_map, config, is_training):
with tf.variable_scope('vlad'):
training = config['train_vlad'] and is_training
if config['intermediate_proj']:
with slim.arg_scope([slim.conv2d, slim.batch_norm], trainable=training):
with slim.arg_scope([slim.batch_norm], is_training=training):
feature_map = slim.conv2d(
feature_map, config['intermediate_proj'], 1, rate=1,
activation_fn=None, normalizer_fn=slim.batch_norm,
weights_initializer=slim.initializers.xavier_initializer(),
trainable=training, scope='pre_proj')
batch_size = tf.shape(feature_map)[0]
feature_dim = feature_map.shape[-1]
with slim.arg_scope([slim.batch_norm], trainable=training, is_training=training):
memberships = slim.conv2d(
feature_map, config['n_clusters'], 1, rate=1,
activation_fn=None, normalizer_fn=slim.batch_norm,
weights_initializer=slim.initializers.xavier_initializer(),
trainable=training, scope='memberships')
memberships = tf.nn.softmax(memberships, axis=-1)
clusters = slim.model_variable(
'clusters', shape=[1, 1, 1, config['n_clusters'], feature_dim],
initializer=slim.initializers.xavier_initializer(), trainable=training)
residuals = clusters - tf.expand_dims(feature_map, axis=3)
residuals *= tf.expand_dims(memberships, axis=-1)
descriptor = tf.reduce_sum(residuals, axis=[1, 2])
descriptor = tf.nn.l2_normalize(descriptor, axis=1) # intra-normalization
descriptor = tf.reshape(descriptor,
[batch_size, feature_dim*config['n_clusters']])
descriptor = tf.nn.l2_normalize(descriptor, axis=1)
return descriptor
示例4: _make_graph
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import model_variable [as 别名]
def _make_graph(self):
self.logger.info("Generating testing graph on {} GPUs ...".format(self.cfg.nr_gpus))
with tf.variable_scope(tf.get_variable_scope()):
for i in range(self.cfg.nr_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i) as name_scope:
with slim.arg_scope([slim.model_variable, slim.variable], device='/device:CPU:0'):
self.net.make_network(is_train=False)
self._input_list.append(self.net.get_inputs())
self._output_list.append(self.net.get_outputs())
tf.get_variable_scope().reuse_variables()
self._outputs = aggregate_batch(self._output_list)
# run_meta = tf.RunMetadata()
# opts = tf.profiler.ProfileOptionBuilder.float_operation()
# flops = tf.profiler.profile(self.sess.graph, run_meta=run_meta, cmd='op', options=opts)
#
# opts = tf.profiler.ProfileOptionBuilder.trainable_variables_parameter()
# params = tf.profiler.profile(self.sess.graph, run_meta=run_meta, cmd='op', options=opts)
# print("{:,} --- {:,}".format(flops.total_float_ops, params.total_parameters))
# from IPython import embed; embed()
return self._outputs
示例5: _scope
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import model_variable [as 别名]
def _scope(self, layer_node, params):
# scopes
scope_list = []
# pin variables on cpu
cpu_context = slim.arg_scope([slim.model_variable], device='/cpu:0')
scope_list.append(cpu_context)
# variable scope with custom getter for overriders
self._add_var_scope(layer_node, params, scope_list)
# custom nested scope
return self._scope_functional(scope_list)
示例6: _add_pggan_kwargs
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import model_variable [as 别名]
def _add_pggan_kwargs(data_batched, sources, targets, alpha_grow, generator_kwargs, discriminator_kwargs):
"""Adds pggan related function parameters to generator, encoder, and discriminator kwargs."""
additional_kwargs = {'is_growing': FLAGS.is_growing, 'alpha_grow': alpha_grow,
'do_self_attention': FLAGS.do_self_attention, 'self_attention_hw': FLAGS.self_attention_hw}
generator_kwargs.update(**additional_kwargs)
generator_kwargs['do_pixel_norm'] = FLAGS.do_pixel_norm
assert targets.dtype == sources.dtype, 'Source and target dtype should be the same.'
generator_kwargs['dtype'] = targets.dtype if targets is not None else None
generator_source_kwargs = copy.copy(generator_kwargs)
generator_source_kwargs['target_shape'] = sources.shape
generator_target_kwargs = copy.copy(generator_kwargs)
generator_target_kwargs['target_shape'] = targets.shape
encoder_kwargs = copy.copy(generator_kwargs)
discriminator_kwargs.update(**additional_kwargs)
if FLAGS.use_gdrop:
discriminator_kwargs[GDROP_STRENGTH_VAR_NAME] = slim.model_variable(GDROP_STRENGTH_VAR_NAME, shape=[],
dtype=targets.dtype,
initializer=tf.zeros_initializer,
trainable=False)
else:
discriminator_kwargs['do_dgrop'] = False
discriminator_source_kwargs = copy.copy(discriminator_kwargs)
discriminator_target_kwargs = copy.copy(discriminator_kwargs)
if FLAGS.use_conditional_labels:
raise NotImplementedError('TwinGAN does not support `use_conditional_labels` flag yet.')
return (encoder_kwargs, generator_source_kwargs, generator_target_kwargs,
discriminator_source_kwargs, discriminator_target_kwargs)
示例7: _make_graph
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import model_variable [as 别名]
def _make_graph(self):
self.logger.info("Generating testing graph on {} GPUs ...".format(self.cfg.num_gpus))
with tf.variable_scope(tf.get_variable_scope()):
for i in range(self.cfg.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i) as name_scope:
with slim.arg_scope([slim.model_variable, slim.variable], device='/device:CPU:0'):
self.net.make_network(is_train=False)
self._input_list.append(self.net.get_inputs())
self._output_list.append(self.net.get_outputs())
tf.get_variable_scope().reuse_variables()
self._outputs = aggregate_batch(self._output_list)
# run_meta = tf.RunMetadata()
# opts = tf.profiler.ProfileOptionBuilder.float_operation()
# flops = tf.profiler.profile(self.sess.graph, run_meta=run_meta, cmd='op', options=opts)
#
# opts = tf.profiler.ProfileOptionBuilder.trainable_variables_parameter()
# params = tf.profiler.profile(self.sess.graph, run_meta=run_meta, cmd='op', options=opts)
# print("{:,} --- {:,}".format(flops.total_float_ops, params.total_parameters))
# from IPython import embed; embed()
return self._outputs
示例8: __init__
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import model_variable [as 别名]
def __init__(self, name, inputs, tower_setup, for_imagenet_classification=False):
super(ResNet50, self).__init__()
#for now always freeze the batch norm of the resnet
inp, n_features_inp = prepare_input(inputs)
#for grayscale
if n_features_inp == 1:
inp = tf.concat([inp, inp, inp], axis=-1)
else:
assert n_features_inp == 3
#to keep the preprocessing consistent with our usual preprocessing, revert the std normalization
from ReID_net.datasets.Util.Normalization import IMAGENET_RGB_STD
#I double checked it, this seems to be the right preprocessing
inp = inp * IMAGENET_RGB_STD * 255
num_classes = 1000 if for_imagenet_classification else None
#note that we do not add the name to the variable scope at the moment, so that if we would use multiple resnets
#in the same network, this will throw an error.
#but if we add the name, the loading of pretrained weights will be difficult
with slim.arg_scope(slim.nets.resnet_v1.resnet_arg_scope()):
with slim.arg_scope([slim.model_variable, slim.variable], device=tower_setup.variable_device):
logits, end_points = slim.nets.resnet_v1.resnet_v1_50(inp, num_classes=num_classes, is_training=False)
#mapping from https://github.com/wuzheng-sjtu/FastFPN/blob/master/libs/nets/pyramid_network.py
mapping = {"C1": "resnet_v1_50/conv1/Relu:0",
"C2": "resnet_v1_50/block1/unit_2/bottleneck_v1",
"C3": "resnet_v1_50/block2/unit_3/bottleneck_v1",
"C4": "resnet_v1_50/block3/unit_5/bottleneck_v1",
"C5": "resnet_v1_50/block4/unit_3/bottleneck_v1"}
if for_imagenet_classification:
self.outputs = [tf.nn.softmax(logits)]
else:
# use C3 up to C5
self.outputs = [end_points[mapping[c]] for c in ["C3", "C4", "C5"]]
self.n_params = 25600000 # roughly 25.6M
示例9: prelu
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import model_variable [as 别名]
def prelu(inputs, data_format='NHWC', scope=None):
with tf.variable_scope(scope, default_name='prelu'):
channel_dim = 1 if data_format == 'NCHW' else 3
inputs_shape = inputs.get_shape().as_list()
alpha_shape = [1 for i in range(len(inputs_shape))]
alpha_shape[channel_dim] = inputs_shape[channel_dim]
alpha = slim.model_variable(
'weights', alpha_shape,
initializer=tf.constant_initializer(0.25))
outputs = tf.where(inputs > 0, inputs, inputs * alpha)
return outputs
示例10: vlad
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import model_variable [as 别名]
def vlad(feature_map, config, training, mask=None):
with tf.variable_scope('vlad'):
if config['intermediate_proj']:
with slim.arg_scope([slim.conv2d, slim.batch_norm], trainable=training):
with slim.arg_scope([slim.batch_norm], is_training=training):
feature_map = slim.conv2d(
feature_map, config['intermediate_proj'], 1, rate=1,
activation_fn=None, normalizer_fn=slim.batch_norm,
weights_initializer=slim.initializers.xavier_initializer(),
trainable=training, scope='pre_proj')
batch_size = tf.shape(feature_map)[0]
feature_dim = feature_map.shape[-1]
with slim.arg_scope([slim.batch_norm], trainable=training, is_training=training):
memberships = slim.conv2d(
feature_map, config['n_clusters'], 1, rate=1,
activation_fn=None, normalizer_fn=slim.batch_norm,
weights_initializer=slim.initializers.xavier_initializer(),
trainable=training, scope='memberships')
memberships = tf.nn.softmax(memberships, axis=-1)
clusters = slim.model_variable(
'clusters', shape=[1, 1, 1, config['n_clusters'], feature_dim],
initializer=slim.initializers.xavier_initializer(), trainable=training)
residuals = clusters - tf.expand_dims(feature_map, axis=3)
residuals *= tf.expand_dims(memberships, axis=-1)
if mask is not None:
residuals *= tf.to_float(mask)[..., tf.newaxis, tf.newaxis]
descriptor = tf.reduce_sum(residuals, axis=[1, 2])
descriptor = tf.nn.l2_normalize(descriptor, axis=1) # intra-normalization
descriptor = tf.reshape(descriptor,
[batch_size, feature_dim*config['n_clusters']])
descriptor = tf.nn.l2_normalize(descriptor, axis=1)
return descriptor
示例11: compute_votes
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import model_variable [as 别名]
def compute_votes(poses_i, o, regularizer, tag=False):
"""Compute the votes by multiplying input poses by transformation matrix.
Multiply the poses of layer i by the transform matrix to compute the votes for
layer j.
Author:
Ashley Gritzman 19/10/2018
Credit:
Suofei Zhang's implementation on GitHub, "Matrix-Capsules-EM-Tensorflow"
https://github.com/www0wwwjs1/Matrix-Capsules-EM-Tensorflow
Args:
poses_i:
poses in layer i tiled according to the kernel
(N*OH*OW, kh*kw*i, 16)
(64*5*5, 9*8, 16)
o: number of output capsules, also called "parent_caps"
regularizer:
Returns:
votes:
(N*OH*OW, kh*kw*i, o, 16)
(64*5*5, 9*8, 32, 16)
"""
batch_size = int(poses_i.get_shape()[0]) # 64*5*5
kh_kw_i = int(poses_i.get_shape()[1]) # 9*8
# (64*5*5, 9*8, 16) -> (64*5*5, 9*8, 1, 4, 4)
output = tf.reshape(poses_i, shape=[batch_size, kh_kw_i, 1, 4, 4])
# the output of capsule is miu, the mean of a Gaussian, and activation, the
# sum of probabilities it has no relationship with the absolute values of w
# and votes using weights with bigger stddev helps numerical stability
w = slim.model_variable('w', shape=[1, kh_kw_i, o, 4, 4],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(
mean=0.0,
stddev=1.0), #1.0
regularizer=regularizer)
# (1, 9*8, 32, 4, 4) -> (64*5*5, 9*8, 32, 4, 4)
w = tf.tile(w, [batch_size, 1, 1, 1, 1])
# (64*5*5, 9*8, 1, 4, 4) -> (64*5*5, 9*8, 32, 4, 4)
output = tf.tile(output, [1, 1, o, 1, 1])
# (64*5*5, 9*8, 32, 4, 4) x (64*5*5, 9*8, 32, 4, 4)
# -> (64*5*5, 9*8, 32, 4, 4)
mult = tf.matmul(output, w)
# (64*5*5, 9*8, 32, 4, 4) -> (64*5*5, 9*8, 32, 16)
votes = tf.reshape(mult, [batch_size, kh_kw_i, o, 16])
# tf.summary.histogram('w', w)
return votes