本文整理汇总了Python中tensorflow.contrib.layers.fully_connected方法的典型用法代码示例。如果您正苦于以下问题:Python layers.fully_connected方法的具体用法?Python layers.fully_connected怎么用?Python layers.fully_connected使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.layers
的用法示例。
在下文中一共展示了layers.fully_connected方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: extract_parameters
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import fully_connected [as 别名]
def extract_parameters(self, features):
output_dim = self.get_num_filter_parameters(
) + self.get_num_mask_parameters()
features = ly.fully_connected(
features,
self.cfg.fc1_size,
scope='fc1',
activation_fn=lrelu,
weights_initializer=tf.contrib.layers.xavier_initializer())
features = ly.fully_connected(
features,
output_dim,
scope='fc2',
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer())
return features[:, :self.get_num_filter_parameters()], \
features[:, self.get_num_filter_parameters():]
# Should be implemented in child classes
示例2: var_dropout
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import fully_connected [as 别名]
def var_dropout(x, n, net_size, n_particles, is_training):
normalizer_params = {'is_training': is_training,
'updates_collections': None}
bn = zs.BayesianNet()
h = x
for i, [n_in, n_out] in enumerate(zip(net_size[:-1], net_size[1:])):
eps_mean = tf.ones([n, n_in])
eps = bn.normal(
'layer' + str(i) + '/eps', eps_mean, std=1.,
n_samples=n_particles, group_ndims=1)
h = layers.fully_connected(
h * eps, n_out, normalizer_fn=layers.batch_norm,
normalizer_params=normalizer_params)
if i < len(net_size) - 2:
h = tf.nn.relu(h)
y = bn.categorical('y', h)
bn.deterministic('y_logit', h)
return bn
示例3: _build_net
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import fully_connected [as 别名]
def _build_net(self, input_BO, scope):
""" The Actor network.
Uses ReLUs for all hidden layers, but a tanh to the output to bound the
action. This follows their 'low-dimensional networks' using 400 and 300
units for the hidden layers. Set `reuse=False`. I don't use batch
normalization or their precise weight initialization.
"""
with tf.variable_scope(scope, reuse=False):
hidden1 = layers.fully_connected(input_BO,
num_outputs=400,
weights_initializer=layers.xavier_initializer(),
activation_fn=tf.nn.relu)
hidden2 = layers.fully_connected(hidden1,
num_outputs=300,
weights_initializer=layers.xavier_initializer(),
activation_fn=tf.nn.relu)
actions_BA = layers.fully_connected(hidden2,
num_outputs=self.ac_dim,
weights_initializer=layers.xavier_initializer(),
activation_fn=tf.nn.tanh) # Note the tanh!
# This should broadcast, but haven't tested with ac_dim > 1.
actions_BA = tf.multiply(actions_BA, self.ac_high)
return actions_BA
示例4: _make_network
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import fully_connected [as 别名]
def _make_network(self, data_in, out_dim):
""" Build the network with the same architecture following OpenAI's paper.
Returns the final *layer* of the network, which corresponds to our
chosen action. There is no non-linearity for the last layer because
different envs have different action ranges.
"""
with tf.variable_scope("ESAgent", reuse=False):
out = data_in
out = layers.fully_connected(out, num_outputs=64,
weights_initializer = layers.xavier_initializer(uniform=True),
#weights_initializer = utils.normc_initializer(0.5),
activation_fn = tf.nn.tanh)
out = layers.fully_connected(out, num_outputs=64,
weights_initializer = layers.xavier_initializer(uniform=True),
#weights_initializer = utils.normc_initializer(0.5),
activation_fn = tf.nn.tanh)
out = layers.fully_connected(out, num_outputs=out_dim,
weights_initializer = layers.xavier_initializer(uniform=True),
#weights_initializer = utils.normc_initializer(0.5),
activation_fn = None)
return out
示例5: __init__
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import fully_connected [as 别名]
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(HardDecisionsToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [decisions_to_data.HardDecisionsToDataLayer(
params, 0, device_assigner),
fully_connected.FullyConnectedLayer(
params, 1, device_assigner=device_assigner)]
示例6: discriminator_res
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import fully_connected [as 别名]
def discriminator_res(H, opt, dropout, prefix='', num_outputs=1, is_reuse=None):
# last layer must be linear
# H = tf.squeeze(H, [1,2])
# pdb.set_trace()
biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
H_dis_0 = layers.fully_connected(tf.nn.dropout(H, keep_prob=dropout), num_outputs=opt.embed_size,
biases_initializer=biasInit, activation_fn=None, scope=prefix + 'dis_1',
reuse=is_reuse)
H_dis_0n = tf.nn.relu(H_dis_0)
H_dis_1 = layers.fully_connected(tf.nn.dropout(H_dis_0n, keep_prob=dropout), num_outputs=opt.embed_size,
biases_initializer=biasInit, activation_fn=None, scope=prefix + 'dis_2',
reuse=is_reuse)
H_dis_1n = tf.nn.relu(H_dis_1) + H_dis_0
H_dis_2 = layers.fully_connected(tf.nn.dropout(H_dis_1n, keep_prob=dropout), num_outputs=opt.embed_size,
biases_initializer=biasInit, activation_fn=None, scope=prefix + 'dis_3',
reuse=is_reuse)
H_dis_2n = tf.nn.relu(H_dis_2) + H_dis_1
H_dis_3 = layers.fully_connected(tf.nn.dropout(H_dis_2n, keep_prob=dropout), num_outputs=opt.embed_size,
biases_initializer=biasInit, activation_fn=None, scope=prefix + 'dis_4',
reuse=is_reuse)
logits = layers.linear(tf.nn.dropout(H_dis_3, keep_prob=dropout), num_outputs=num_outputs,
biases_initializer=biasInit, scope=prefix + 'dis_10', reuse=is_reuse)
return logits
示例7: BuildModel
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import fully_connected [as 别名]
def BuildModel(self, resnet_fn, block_fn):
# We use this model as a test case because the slim.nets.resnet module is
# used in some production.
#
# The model looks as follows:
#
# Image --> unit_1/shortcut
# Image --> unit_1/conv1 --> unit_1/conv2 --> unit_1/conv3
#
# unit_1/shortcut + unit_1/conv3 --> unit_1 (residual connection)
#
# unit_1 --> unit_2/conv1 -> unit_2/conv2 --> unit_2/conv3
#
# unit_1 + unit_2/conv3 --> unit_2 (residual connection)
#
# In between, there are strided convolutions and pooling ops, but these
# should not affect the regularizer.
blocks = [
block_fn('block1', base_depth=7, num_units=2, stride=2),
]
image = tf.constant(0.0, shape=[1, 2, 2, NUM_CHANNELS])
net = resnet_fn(
image, blocks, include_root_block=False, is_training=False)[0]
net = tf.reduce_mean(net, axis=(1, 2))
return slim.layers.fully_connected(net, 23, scope='FC')
示例8: fully_connected
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import fully_connected [as 别名]
def fully_connected(self, *args, **kwargs):
"""Masks NUM_OUTPUTS from the function pointed to by 'fully_connected'.
The object's parameterization has precedence over the given NUM_OUTPUTS
argument. The resolution of the op names uses
tf.contrib.framework.get_name_scope() and kwargs['scope'].
Args:
*args: Arguments for the operation.
**kwargs: Key arguments for the operation.
Returns:
The result of the application of the function_map['fully_connected'] to
the given 'inputs', '*args' and '**kwargs' while possibly overriding
NUM_OUTPUTS according the parameterization.
Raises:
ValueError: If kwargs does not contain a key named 'scope'.
"""
inputs = _get_from_args_or_kwargs('inputs', 0, args, kwargs)
if inputs.shape.ndims != 2:
raise ValueError(
'ConfigurableOps does not suport fully_connected with rank != 2')
fn, suffix = self._get_function_and_suffix('fully_connected')
return self._mask(fn, suffix, *args, **kwargs)
示例9: model
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import fully_connected [as 别名]
def model(img_in, num_actions, scope, reuse=False, layer_norm=False):
"""As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
conv_out = layers.flatten(out)
with tf.variable_scope("action_value"):
value_out = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None)
if layer_norm:
value_out = layer_norm_fn(value_out, relu=True)
else:
value_out = tf.nn.relu(value_out)
value_out = layers.fully_connected(value_out, num_outputs=num_actions, activation_fn=None)
return value_out
示例10: model
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import fully_connected [as 别名]
def model(img_in, num_actions, scope, noisy=False, reuse=False,
concat_softmax=False):
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32, kernel_size=8,
stride=4, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=4,
stride=2, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=3,
stride=1, activation_fn=tf.nn.relu)
out = layers.flatten(out)
with tf.variable_scope("action_value"):
if noisy:
# Apply noisy network on fully connected layers
# ref: https://arxiv.org/abs/1706.10295
out = noisy_dense(out, name='noisy_fc1', size=512,
activation_fn=tf.nn.relu)
out = noisy_dense(out, name='noisy_fc2', size=num_actions)
else:
out = layers.fully_connected(out, num_outputs=512,
activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_actions,
activation_fn=None)
# V: Softmax - inspired by deep-rl-attack #
if concat_softmax:
out = tf.nn.softmax(out)
return out
示例11: atari_model
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import fully_connected [as 别名]
def atari_model(img_in, num_actions, scope, reuse=False):
# as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
out = layers.flatten(out)
with tf.variable_scope("action_value"):
out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out
示例12: atari_model
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import fully_connected [as 别名]
def atari_model(ram_in, num_actions, scope, reuse=False):
with tf.variable_scope(scope, reuse=reuse):
out = ram_in
#out = tf.concat(1,(ram_in[:,4:5],ram_in[:,8:9],ram_in[:,11:13],ram_in[:,21:22],ram_in[:,50:51], ram_in[:,60:61],ram_in[:,64:65]))
with tf.variable_scope("action_value"):
out = layers.fully_connected(out, num_outputs=256, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=128, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=64, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out
示例13: lander_model
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import fully_connected [as 别名]
def lander_model(obs, num_actions, scope, reuse=False):
with tf.variable_scope(scope, reuse=reuse):
out = obs
with tf.variable_scope("action_value"):
out = layers.fully_connected(out, num_outputs=64, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=64, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out
示例14: _mlp
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import fully_connected [as 别名]
def _mlp(hiddens, inpt, num_actions, scope, reuse=False, layer_norm=False):
with tf.variable_scope(scope, reuse=reuse):
out = inpt
for hidden in hiddens:
out = layers.fully_connected(out, num_outputs=hidden, activation_fn=None)
if layer_norm:
out = layers.layer_norm(out, center=True, scale=True)
out = tf.nn.relu(out)
q_out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return q_out
示例15: _cnn_to_mlp
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import fully_connected [as 别名]
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False, layer_norm=False):
with tf.variable_scope(scope, reuse=reuse):
out = inpt
with tf.variable_scope("convnet"):
for num_outputs, kernel_size, stride in convs:
out = layers.convolution2d(out,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
activation_fn=tf.nn.relu)
conv_out = layers.flatten(out)
with tf.variable_scope("action_value"):
action_out = conv_out
for hidden in hiddens:
action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
if layer_norm:
action_out = layers.layer_norm(action_out, center=True, scale=True)
action_out = tf.nn.relu(action_out)
action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)
if dueling:
with tf.variable_scope("state_value"):
state_out = conv_out
for hidden in hiddens:
state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
if layer_norm:
state_out = layers.layer_norm(state_out, center=True, scale=True)
state_out = tf.nn.relu(state_out)
state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
q_out = state_score + action_scores_centered
else:
q_out = action_scores
return q_out