本文整理汇总了Python中tensorflow.contrib.layers.linear方法的典型用法代码示例。如果您正苦于以下问题:Python layers.linear方法的具体用法?Python layers.linear怎么用?Python layers.linear使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.layers
的用法示例。
在下文中一共展示了layers.linear方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_filter
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import linear [as 别名]
def _get_filter(self, data, grid, scope=None):
""" Generate an attention filter """
with tf.variable_scope(scope, 'filter', [data]):
x_offset, y_offset, log_stride, log_scale, log_gamma = tf.split(
layers.linear(data, 5, scope='parameters'), 5, axis=1)
center = self._get_center(grid, (x_offset, y_offset), tf.exp(log_stride))
scale = tf.expand_dims(tf.maximum(tf.exp(log_scale), self.epsilon), -1)
filter_x = 1 + tf.square((self.data_x - center[0]) / tf.maximum(scale, self.epsilon))
filter_y = 1 + tf.square((self.data_y - center[1]) / tf.maximum(scale, self.epsilon))
filter_x = tf.reciprocal(tf.maximum(pi * scale * filter_x, self.epsilon))
filter_y = tf.reciprocal(tf.maximum(pi * scale * filter_y, self.epsilon))
return filter_x, filter_y, tf.exp(log_gamma)
示例2: approximate_posterior
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import linear [as 别名]
def approximate_posterior(self, tensor, scope='posterior'):
""" Calculate the approximate posterior given the tensor """
# Generate mu and sigma of the Gaussian for the approximate posterior
with tf.variable_scope(scope, 'posterior', [tensor]):
mean = layers.linear(tensor, self.sample_size, scope='mean')
# Use the log of sigma for numerical stability
log_sigma = layers.linear(tensor, self.sample_size, scope='log_sigma')
# Create the Gaussian distribution
sigma = tf.exp(log_sigma)
posterior = distributions.Normal(mean, sigma, name='posterior')
self.collect_named_outputs(posterior.loc)
self.collect_named_outputs(posterior.scale)
self.posteriors.append(posterior)
return posterior
示例3: discriminator_res
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import linear [as 别名]
def discriminator_res(H, opt, dropout, prefix='', num_outputs=1, is_reuse=None):
# last layer must be linear
# H = tf.squeeze(H, [1,2])
# pdb.set_trace()
biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
H_dis_0 = layers.fully_connected(tf.nn.dropout(H, keep_prob=dropout), num_outputs=opt.embed_size,
biases_initializer=biasInit, activation_fn=None, scope=prefix + 'dis_1',
reuse=is_reuse)
H_dis_0n = tf.nn.relu(H_dis_0)
H_dis_1 = layers.fully_connected(tf.nn.dropout(H_dis_0n, keep_prob=dropout), num_outputs=opt.embed_size,
biases_initializer=biasInit, activation_fn=None, scope=prefix + 'dis_2',
reuse=is_reuse)
H_dis_1n = tf.nn.relu(H_dis_1) + H_dis_0
H_dis_2 = layers.fully_connected(tf.nn.dropout(H_dis_1n, keep_prob=dropout), num_outputs=opt.embed_size,
biases_initializer=biasInit, activation_fn=None, scope=prefix + 'dis_3',
reuse=is_reuse)
H_dis_2n = tf.nn.relu(H_dis_2) + H_dis_1
H_dis_3 = layers.fully_connected(tf.nn.dropout(H_dis_2n, keep_prob=dropout), num_outputs=opt.embed_size,
biases_initializer=biasInit, activation_fn=None, scope=prefix + 'dis_4',
reuse=is_reuse)
logits = layers.linear(tf.nn.dropout(H_dis_3, keep_prob=dropout), num_outputs=num_outputs,
biases_initializer=biasInit, scope=prefix + 'dis_10', reuse=is_reuse)
return logits
示例4: write
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import linear [as 别名]
def write(self, data):
""" Do a write given the data """
return layers.linear(data, self.output_size, scope='write')
示例5: read
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import linear [as 别名]
def read(self, data, focus):
""" Do a read given the data """
focus = layers.linear(focus, data.get_shape().as_list()[-1])
focused = tf.expand_dims(self.focus_fn(focus, name='focus'), 1)
return layers.flatten(focused * data)
示例6: read_multiple
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import linear [as 别名]
def read_multiple(self, data_list, focus):
""" Do a filtered read for multiple tensors using the same focus """
focus = layers.linear(focus, data_list[0].get_shape().as_list()[-1])
focused = tf.expand_dims(self.focus_fn(focus, name='focus'), 1)
focus_list = []
for data in data_list:
focus_list.append(layers.flatten(focused * data))
return tf.concat(focus_list, 1)
示例7: _get_key
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import linear [as 别名]
def _get_key(self, focus):
""" Get the key for the data """
beta = layers.linear(focus, 1)
key = layers.linear(focus, self.shape[1])
return beta, tf.expand_dims(tf.nn.l2_normalize(key, -1), -1)
示例8: regression_head
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import linear [as 别名]
def regression_head(label_name=None,
weight_column_name=None,
label_dimension=1,
enable_centered_bias=False,
head_name=None):
"""Creates a `Head` for linear regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
Returns:
An instance of `Head` for linear regression.
"""
return _RegressionHead(
label_name=label_name,
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
loss_fn=_mean_squared_loss,
link_fn=array_ops.identity)
示例9: _logits
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import linear [as 别名]
def _logits(logits_input, logits, logits_dimension):
"""Validate logits args, and create `logits` if necessary.
Exactly one of `logits_input` and `logits` must be provided.
Args:
logits_input: `Tensor` input to `logits`.
logits: `Tensor` output.
logits_dimension: Integer, last dimension of `logits`. This is used to
create `logits` from `logits_input` if `logits` is `None`; otherwise, it's
used to validate `logits`.
Returns:
`logits` `Tensor`.
Raises:
ValueError: if neither or both of `logits` and `logits_input` are supplied.
"""
if (logits_dimension is None) or (logits_dimension < 1):
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
# If not provided, create logits.
if logits is None:
if logits_input is None:
raise ValueError("Neither logits nor logits_input supplied.")
return layers_lib.linear(logits_input, logits_dimension, scope="logits")
if logits_input is not None:
raise ValueError("Both logits and logits_input supplied.")
logits = ops.convert_to_tensor(logits, name="logits")
logits_dims = logits.get_shape().dims
if logits_dims is not None:
logits_dims[-1].assert_is_compatible_with(logits_dimension)
return logits
示例10: discriminator_1layer
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import linear [as 别名]
def discriminator_1layer(H, opt, dropout, prefix='', num_outputs=1, is_reuse=None):
# last layer must be linear
H = tf.squeeze(H)
biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
H_dis = layers.fully_connected(tf.nn.dropout(H, keep_prob=dropout), num_outputs=opt.H_dis,
biases_initializer=biasInit, activation_fn=tf.nn.relu, scope=prefix + 'dis_1',
reuse=is_reuse)
return H_dis
示例11: discriminator_0layer
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import linear [as 别名]
def discriminator_0layer(H, opt, dropout, prefix='', num_outputs=1, is_reuse=None):
H = tf.squeeze(H)
biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
logits = layers.linear(tf.nn.dropout(H, keep_prob=dropout), num_outputs=num_outputs, biases_initializer=biasInit,
scope=prefix + 'dis', reuse=is_reuse)
return logits
示例12: discriminator_2layer
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import linear [as 别名]
def discriminator_2layer(H, opt, dropout, prefix='', num_outputs=1, is_reuse=None):
# last layer must be linear
# H = tf.squeeze(H, [1,2])
# pdb.set_trace()
biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
H_dis = layers.fully_connected(tf.nn.dropout(H, keep_prob=dropout), num_outputs=opt.H_dis,
biases_initializer=biasInit, activation_fn=tf.nn.relu, scope=prefix + 'dis_1',
reuse=is_reuse)
logits = layers.linear(tf.nn.dropout(H_dis, keep_prob=dropout), num_outputs=num_outputs,
biases_initializer=biasInit, scope=prefix + 'dis_2', reuse=is_reuse)
return logits
示例13: discriminator_3layer
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import linear [as 别名]
def discriminator_3layer(H, opt, dropout, prefix='', num_outputs=1, is_reuse=None):
# last layer must be linear
# H = tf.squeeze(H, [1,2])
# pdb.set_trace()
biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
H_dis = layers.fully_connected(tf.nn.dropout(H, keep_prob=dropout), num_outputs=opt.H_dis,
biases_initializer=biasInit, activation_fn=tf.nn.relu, scope=prefix + 'dis_1',
reuse=is_reuse)
H_dis = layers.fully_connected(tf.nn.dropout(H_dis, keep_prob=dropout), num_outputs=opt.H_dis,
biases_initializer=biasInit, activation_fn=tf.nn.relu, scope=prefix + 'dis_2',
reuse=is_reuse)
logits = layers.linear(tf.nn.dropout(H_dis, keep_prob=dropout), num_outputs=num_outputs,
biases_initializer=biasInit, scope=prefix + 'dis_3', reuse=is_reuse)
return logits
示例14: discriminator_2layer
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import linear [as 别名]
def discriminator_2layer(H, opt, dropout, prefix='', num_outputs=1, is_reuse=None):
# last layer must be linear
print(num_outputs, "===num outputs===")
biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
H_dis = layers.fully_connected(tf.nn.dropout(H, keep_prob=dropout), num_outputs=opt.H_dis,
biases_initializer=biasInit, activation_fn=tf.nn.relu, scope=prefix + 'dis_1',
reuse=is_reuse)
logits = layers.linear(tf.nn.dropout(H_dis, keep_prob=dropout), num_outputs=num_outputs,
biases_initializer=biasInit, scope=prefix + 'dis_2', reuse=is_reuse)
return logits
示例15: discriminator_3layer
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import linear [as 别名]
def discriminator_3layer(H, opt, dropout, prefix='', num_outputs=1, is_reuse=None):
# last layer must be linear
biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
H_dis = layers.fully_connected(tf.nn.dropout(H, keep_prob=dropout), num_outputs=opt.H_dis,
biases_initializer=biasInit, activation_fn=tf.nn.relu, scope=prefix + 'dis_1',
reuse=is_reuse)
H_dis = layers.fully_connected(tf.nn.dropout(H_dis, keep_prob=dropout), num_outputs=opt.H_dis,
biases_initializer=biasInit, activation_fn=tf.nn.relu, scope=prefix + 'dis_2',
reuse=is_reuse)
logits = layers.linear(tf.nn.dropout(H_dis, keep_prob=dropout), num_outputs=num_outputs,
biases_initializer=biasInit, scope=prefix + 'dis_3', reuse=is_reuse)
return logits