本文整理汇总了Python中tensorflow.contrib.slim.flatten方法的典型用法代码示例。如果您正苦于以下问题:Python slim.flatten方法的具体用法?Python slim.flatten怎么用?Python slim.flatten使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.slim
的用法示例。
在下文中一共展示了slim.flatten方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: combine_setup
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import flatten [as 别名]
def combine_setup(name, combine_type, embed_img, embed_goal, num_img_neuorons=None,
num_goal_neurons=None):
with tf.name_scope(name + '_' + combine_type):
if combine_type == 'add':
# Simple concat features from goal and image
out = embed_img + embed_goal
elif combine_type == 'multiply':
# Multiply things together
re_embed_img = tf.reshape(
embed_img, shape=[-1, num_img_neuorons / num_goal_neurons,
num_goal_neurons])
re_embed_goal = tf.reshape(embed_goal, shape=[-1, num_goal_neurons, 1])
x = tf.matmul(re_embed_img, re_embed_goal, transpose_a=False, transpose_b=False)
out = slim.flatten(x)
elif combine_type == 'none' or combine_type == 'imgonly':
out = embed_img
elif combine_type == 'goalonly':
out = embed_goal
else:
logging.fatal('Undefined combine_type: %s', combine_type)
return out
示例2: encoder
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import flatten [as 别名]
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
net = slim.conv2d(net, 512, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_5')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
示例3: encoder
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import flatten [as 别名]
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
示例4: gradient_penalty
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import flatten [as 别名]
def gradient_penalty(f, real, fake=None):
def _interpolate(a, b=None):
with tf.name_scope('interpolate'):
if b is None: # interpolation in DRAGAN
beta = tf.random_uniform(shape=tf.shape(a), minval=0., maxval=1.)
_, variance = tf.nn.moments(a, list(range(a.shape.ndims)))
b = a + 0.5 * tf.sqrt(variance) * beta
shape = [tf.shape(a)[0]] + [1] * (a.shape.ndims - 1)
alpha = tf.random_uniform(shape=shape, minval=0., maxval=1.)
inter = a + alpha * (b - a)
inter.set_shape(a.get_shape().as_list())
return inter
with tf.name_scope('gradient_penalty'):
x = _interpolate(real, fake)
pred = f(x)
if isinstance(pred, tuple):
pred = pred[0]
grad = tf.gradients(pred, x)[0]
norm = tf.norm(slim.flatten(grad), axis=1)
gp = tf.reduce_mean((norm - 1.)**2)
return gp
示例5: se_module
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import flatten [as 别名]
def se_module(input_net, ratio=16, reuse = None, scope = None):
with tf.variable_scope(scope, 'SE', [input_net], reuse=reuse):
h,w,c = tuple([dim.value for dim in input_net.shape[1:4]])
assert c % ratio == 0
hidden_units = int(c / ratio)
squeeze = slim.avg_pool2d(input_net, [h,w], padding='VALID')
excitation = slim.flatten(squeeze)
excitation = slim.fully_connected(excitation, hidden_units, scope='se_fc1',
weights_regularizer=None,
weights_initializer=slim.xavier_initializer(),
activation_fn=tf.nn.relu)
excitation = slim.fully_connected(excitation, c, scope='se_fc2',
weights_regularizer=None,
weights_initializer=slim.xavier_initializer(),
activation_fn=tf.nn.sigmoid)
excitation = tf.reshape(excitation, [-1,1,1,c])
output_net = input_net * excitation
return output_net
示例6: content_extractor
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import flatten [as 别名]
def content_extractor(self, images, reuse=False):
# images: (batch, 32, 32, 3) or (batch, 32, 32, 1)
if images.get_shape()[3] == 1:
# For mnist dataset, replicate the gray scale image 3 times.
images = tf.image.grayscale_to_rgb(images)
with tf.variable_scope('content_extractor', reuse=reuse):
with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=None,
stride=2, weights_initializer=tf.contrib.layers.xavier_initializer()):
with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True,
activation_fn=tf.nn.relu, is_training=(self.mode=='train' or self.mode=='pretrain')):
net = slim.conv2d(images, 64, [3, 3], scope='conv1') # (batch_size, 16, 16, 64)
net = slim.batch_norm(net, scope='bn1')
net = slim.conv2d(net, 128, [3, 3], scope='conv2') # (batch_size, 8, 8, 128)
net = slim.batch_norm(net, scope='bn2')
net = slim.conv2d(net, 256, [3, 3], scope='conv3') # (batch_size, 4, 4, 256)
net = slim.batch_norm(net, scope='bn3')
net = slim.conv2d(net, 128, [4, 4], padding='VALID', scope='conv4') # (batch_size, 1, 1, 128)
net = slim.batch_norm(net, activation_fn=tf.nn.tanh, scope='bn4')
if self.mode == 'pretrain':
net = slim.conv2d(net, 10, [1, 1], padding='VALID', scope='out')
net = slim.flatten(net)
return net
示例7: discriminator
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import flatten [as 别名]
def discriminator(self, images, reuse=False):
# images: (batch, 32, 32, 1)
with tf.variable_scope('discriminator', reuse=reuse):
with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=None,
stride=2, weights_initializer=tf.contrib.layers.xavier_initializer()):
with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True,
activation_fn=tf.nn.relu, is_training=(self.mode=='train')):
net = slim.conv2d(images, 128, [3, 3], activation_fn=tf.nn.relu, scope='conv1') # (batch_size, 16, 16, 128)
net = slim.batch_norm(net, scope='bn1')
net = slim.conv2d(net, 256, [3, 3], scope='conv2') # (batch_size, 8, 8, 256)
net = slim.batch_norm(net, scope='bn2')
net = slim.conv2d(net, 512, [3, 3], scope='conv3') # (batch_size, 4, 4, 512)
net = slim.batch_norm(net, scope='bn3')
net = slim.conv2d(net, 1, [4, 4], padding='VALID', scope='conv4') # (batch_size, 1, 1, 1)
net = slim.flatten(net)
return net
示例8: _fc_iter
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import flatten [as 别名]
def _fc_iter(self, mem_pool5, is_training, name, iter):
xavier = tf.contrib.layers.variance_scaling_initializer()
with tf.variable_scope(name):
mem_fc7 = slim.flatten(mem_pool5, scope='flatten')
with slim.arg_scope([slim.fully_connected],
activation_fn=tf.nn.relu,
trainable=is_training,
weights_initializer=xavier,
biases_initializer=tf.constant_initializer(0.0)):
for i in xrange(cfg.MEM.FC_L):
mem_fc7 = slim.fully_connected(mem_fc7,
cfg.MEM.FC_C,
scope="mem_fc%d" % (i+6))
self._act_summaries.append(mem_fc7)
self._score_summaries[iter].append(mem_fc7)
return mem_fc7
示例9: build_predictions
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import flatten [as 别名]
def build_predictions(self, net, rois, is_training, initializer, initializer_bbox):
# Crop image ROIs
pool5 = self._crop_pool_layer(net, rois, "pool5")
pool5_flat = slim.flatten(pool5, scope='flatten')
# Fully connected layers
fc6 = slim.fully_connected(pool5_flat, 4096, scope='fc6')
if is_training:
fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True, scope='dropout6')
fc7 = slim.fully_connected(fc6, 4096, scope='fc7')
if is_training:
fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True, scope='dropout7')
# Scores and predictions
cls_score = slim.fully_connected(fc7, self._num_classes, weights_initializer=initializer, trainable=is_training, activation_fn=None, scope='cls_score')
cls_prob = self._softmax_layer(cls_score, "cls_prob")
bbox_prediction = slim.fully_connected(fc7, self._num_classes * 4, weights_initializer=initializer_bbox, trainable=is_training, activation_fn=None, scope='bbox_pred')
return cls_score, cls_prob, bbox_prediction
示例10: __call__
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import flatten [as 别名]
def __call__(self, inputs, state, scope=None):
batch_size = tf.shape(inputs)[0]
if self._apply_to == 'input':
inputs = slim.flatten(inputs) if self._shape == -1 else tf.reshape(inputs, [batch_size] + self._shape)
return self._cell(inputs, state)
elif self._apply_to == 'output':
output, res_state = self._cell(inputs, state)
output = slim.flatten(output) if self._shape == -1 else tf.reshape(output, [batch_size] + self._shape)
return output, res_state
elif self._apply_to == 'state':
output, res_state = self._cell(inputs, state)
res_state = slim.flatten(res_state) if self._shape == -1 else tf.reshape(res_state, [batch_size] + self._shape)
return output, res_state
else:
raise ValueError('Unknown apply_to: "{}"'.format(self._apply_to))
示例11: forward
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import flatten [as 别名]
def forward(self):
temp = tf.transpose(
self.inp.out, [0,3,1,2])
self.out = slim.flatten(
temp, scope = self.scope)
示例12: encoder
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import flatten [as 别名]
def encoder(self, x):
""" Convolutional variational encoder to encode image into a low-dimensional latent code
If config.conv == False it is a MLP VAE. If config.use_vae == False, it is a normal encoder
:param x: sequence of images
:return: a, a_mu, a_var
"""
with tf.variable_scope('vae/encoder'):
if self.config.conv:
x_flat_conv = tf.reshape(x, (-1, self.d1, self.d2, 1))
enc_hidden = slim.stack(x_flat_conv,
slim.conv2d,
self.num_filters,
kernel_size=self.config.filter_size,
stride=2,
activation_fn=self.activation_fn,
padding='SAME')
enc_flat = slim.flatten(enc_hidden)
self.enc_shape = enc_hidden.get_shape().as_list()[1:]
else:
x_flat = tf.reshape(x, (-1, self.d1 * self.d2))
enc_flat = slim.repeat(x_flat, self.config.num_layers, slim.fully_connected,
self.config.vae_num_units, self.activation_fn)
a_mu = slim.fully_connected(enc_flat, self.config.dim_a, activation_fn=None)
if self.config.use_vae:
a_var = slim.fully_connected(enc_flat, self.config.dim_a, activation_fn=tf.nn.sigmoid)
a_var = self.config.noise_emission * a_var
a = simple_sample(a_mu, a_var)
else:
a_var = tf.constant(1., dtype=tf.float32, shape=())
a = a_mu
a_seq = tf.reshape(a, tf.stack((-1, self.ph_steps, self.config.dim_a)))
return a_seq, a_mu, a_var
示例13: encoder
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import flatten [as 别名]
def encoder(self, images, is_training):
activation_fn = leaky_relu # tf.nn.relu
weight_decay = 0.0
with tf.variable_scope('encoder'):
with slim.arg_scope([slim.batch_norm],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=self.batch_norm_params):
net = images
net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')
net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')
net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')
net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')
net = slim.flatten(net)
fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
return fc1, fc2
示例14: _head_to_tail
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import flatten [as 别名]
def _head_to_tail(self, pool5, is_training, reuse=None):
with tf.variable_scope(self._scope, self._scope, reuse=reuse):
pool5_flat = slim.flatten(pool5, scope='flatten')
fc6 = slim.fully_connected(pool5_flat, 4096, scope='fc6')
if is_training:
fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True,
scope='dropout6')
fc7 = slim.fully_connected(fc6, 4096, scope='fc7')
if is_training:
fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True,
scope='dropout7')
return fc7
示例15: l2_batch_normalize
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import flatten [as 别名]
def l2_batch_normalize(x, epsilon=1e-12, scope=None):
"""
Helper function to normalize a batch of vectors.
:param x: the input placeholder
:param epsilon: stabilizes division
:return: the batch of l2 normalized vector
"""
with tf.name_scope(scope, "l2_batch_normalize") as scope:
x_shape = tf.shape(x)
x = slim.flatten(x)
x /= (epsilon + tf.reduce_max(tf.abs(x), 1, keep_dims=True))
square_sum = tf.reduce_sum(tf.square(x), 1, keep_dims=True)
x_inv_norm = tf.rsqrt(np.sqrt(epsilon) + square_sum)
x_norm = tf.multiply(x, x_inv_norm)
return tf.reshape(x_norm, x_shape, scope)