本文整理汇总了Python中tensorflow.contrib.layers.fully_connected函数的典型用法代码示例。如果您正苦于以下问题:Python fully_connected函数的具体用法?Python fully_connected怎么用?Python fully_connected使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了fully_connected函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, input_size=4, hidden_size=2, gamma=0.95,
action_size=2, alpha=0.1):
self.input_size = input_size
self.hidden_size = hidden_size
self.gamma = gamma
self.action_size = action_size
self.alpha = alpha
# save the hyper parameters
self.params = self.__dict__.copy()
# placeholders
self.input_pl = tf.placeholder(tf.float32, [None, input_size])
self.action_pl = tf.placeholder(tf.int32, [None])
self.reward_pl = tf.placeholder(tf.float32, [None])
# a two-layer fully connected network
hidden_layer = layers.fully_connected(self.input_pl,
hidden_size,
biases_initializer=None,
activation_fn=tf.nn.relu)
self.output = layers.fully_connected(hidden_layer,
action_size,
biases_initializer=None,
activation_fn=tf.nn.softmax)
# responsible output
one_hot = tf.one_hot(self.action_pl, action_size)
responsible_output = tf.reduce_sum(self.output * one_hot, axis=1)
self.loss = -tf.reduce_mean(tf.log(responsible_output) * self.reward_pl)
# training variables
variables = tf.trainable_variables()
self.variable_pls = []
for i, var in enumerate(variables):
self.variable_pls.append(tf.placeholder(tf.float32))
self.gradients = tf.gradients(self.loss, variables)
solver = tf.train.AdamOptimizer(learning_rate=alpha)
self.update = solver.apply_gradients(zip(self.variable_pls, variables))
示例2: _build_q_network
def _build_q_network(registry, inputs, num_actions, config):
dueling = config["dueling"]
hiddens = config["hiddens"]
frontend = ModelCatalog.get_model(registry, inputs, 1, config["model"])
frontend_out = frontend.last_layer
with tf.variable_scope("action_value"):
action_out = frontend_out
for hidden in hiddens:
action_out = layers.fully_connected(
action_out, num_outputs=hidden, activation_fn=tf.nn.relu)
action_scores = layers.fully_connected(
action_out, num_outputs=num_actions, activation_fn=None)
if dueling:
with tf.variable_scope("state_value"):
state_out = frontend_out
for hidden in hiddens:
state_out = layers.fully_connected(
state_out, num_outputs=hidden, activation_fn=tf.nn.relu)
state_score = layers.fully_connected(
state_out, num_outputs=1, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores_centered = action_scores - tf.expand_dims(
action_scores_mean, 1)
return state_score + action_scores_centered
else:
return action_scores
示例3: q_func_builder
def q_func_builder(input_placeholder, num_actions, scope, reuse=False):
with tf.variable_scope(scope, reuse=reuse):
latent = network(input_placeholder)
if isinstance(latent, tuple):
if latent[1] is not None:
raise NotImplementedError("DQN is not compatible with recurrent policies yet")
latent = latent[0]
latent = layers.flatten(latent)
with tf.variable_scope("action_value"):
action_out = latent
for hidden in hiddens:
action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
if layer_norm:
action_out = layers.layer_norm(action_out, center=True, scale=True)
action_out = tf.nn.relu(action_out)
action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)
if dueling:
with tf.variable_scope("state_value"):
state_out = latent
for hidden in hiddens:
state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
if layer_norm:
state_out = layers.layer_norm(state_out, center=True, scale=True)
state_out = tf.nn.relu(state_out)
state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
q_out = state_score + action_scores_centered
else:
q_out = action_scores
return q_out
示例4: bert_self_attention
def bert_self_attention(config, hidden_states, attention_mask):
with tf.variable_scope("BertSelfAttention"):
mixed_query_layer = layers.fully_connected(hidden_states,
config.hidden_size, scope="FCquery", activation_fn=None)
mixed_key_layer = layers.fully_connected(hidden_states,
config.hidden_size, scope="FCkey", activation_fn=None)
mixed_value_layer = layers.fully_connected(hidden_states,
config.hidden_size, scope="FCvalue", activation_fn=None)
query_layer = transpose_for_scores(config, mixed_query_layer)
key_layer = transpose_for_scores(config, mixed_key_layer)
value_layer = transpose_for_scores(config, mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, [0, 1, 3, 2]))
# TODO(jonathan): the output of matmul is different than pyTorch's expected broadcasting
# behavior... investigate
attention_scores = attention_scores / np.sqrt(config.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = tf.nn.dropout(attention_probs, keep_prob=1.0 - config.attention_probs_dropout_prob)
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, (0, 2, 1, 3))
new_context_layer_shape = [tf.shape(context_layer)[i] for i in range(2)] + [config.all_head_size]
context_layer = tf.reshape(context_layer, new_context_layer_shape)
return context_layer
示例5: model
def model(img_in, num_actions, scope, noisy=False, reuse=False,
concat_softmax=False):
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32, kernel_size=8,
stride=4, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=4,
stride=2, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=3,
stride=1, activation_fn=tf.nn.relu)
out = layers.flatten(out)
with tf.variable_scope("action_value"):
if noisy:
# Apply noisy network on fully connected layers
# ref: https://arxiv.org/abs/1706.10295
out = noisy_dense(out, name='noisy_fc1', size=512,
activation_fn=tf.nn.relu)
out = noisy_dense(out, name='noisy_fc2', size=num_actions)
else:
out = layers.fully_connected(out, num_outputs=512,
activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_actions,
activation_fn=None)
# V: Softmax - inspired by deep-rl-attack #
if concat_softmax:
out = tf.nn.softmax(out)
return out
示例6: add_final_training_ops
def add_final_training_ops(self,
embeddings,
all_labels_count,
hidden_layer_size=BOTTLENECK_TENSOR_SIZE / 4,
dropout_keep_prob=None):
"""Adds a new softmax and fully-connected layer for training.
The set up for the softmax and fully-connected layers is based on:
https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
This function can be customized to add arbitrary layers for
application-specific requirements.
Args:
embeddings: The embedding (bottleneck) tensor.
all_labels_count: The number of all labels including the default label.
hidden_layer_size: The size of the hidden_layer. Roughtly, 1/4 of the
bottleneck tensor size.
dropout_keep_prob: the percentage of activation values that are retained.
Returns:
softmax: The softmax or tensor. It stores the final scores.
logits: The logits tensor.
"""
with tf.name_scope('input'):
with tf.name_scope('Wx_plus_b'):
hidden = layers.fully_connected(embeddings, hidden_layer_size)
# We need a dropout when the size of the dataset is rather small.
if dropout_keep_prob:
hidden = tf.nn.dropout(hidden, dropout_keep_prob)
logits = layers.fully_connected(
hidden, all_labels_count, activation_fn=None)
softmax = tf.nn.softmax(logits, name='softmax')
return softmax, logits
示例7: dueling_model
def dueling_model(img_in, num_actions, scope, reuse=False, layer_norm=False):
"""As described in https://arxiv.org/abs/1511.06581"""
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
conv_out = layers.flatten(out)
with tf.variable_scope("state_value"):
state_hidden = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None)
if layer_norm:
state_hidden = layer_norm_fn(state_hidden, relu=True)
else:
state_hidden = tf.nn.relu(state_hidden)
state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
with tf.variable_scope("action_value"):
actions_hidden = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None)
if layer_norm:
actions_hidden = layer_norm_fn(actions_hidden, relu=True)
else:
actions_hidden = tf.nn.relu(actions_hidden)
action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)
return state_score + action_scores
示例8: multilayer_perceptron
def multilayer_perceptron(x):
W_fc1 = tf.Variable(tf.random_normal([784, 256], mean=0, stddev=1))
b_fc1 = tf.Variable([0] * 256) # ???????
fc1 = tf.nn.xw_plus_b(x, W_fc1, b_fc1)
fc2 = layers.fully_connected(fc1, 256, activation_fn=tf.nn.relu, scope='fc2')
out = layers.fully_connected(fc2, 10, activation_fn=None, scope='out')
return out
示例9: model
def model(inpt, num_actions, scope, reuse=False):
"""This model takes as input an observation and returns values of all actions."""
with tf.variable_scope(scope, reuse=reuse):
out = inpt
out = layers.fully_connected(out, num_outputs=64, activation_fn=tf.nn.tanh)
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out
示例10: _cnn_to_mlp
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False, layer_norm=False):
with tf.variable_scope(scope, reuse=reuse):
out = inpt
with tf.variable_scope("convnet"):
for num_outputs, kernel_size, stride in convs:
out = layers.convolution2d(out,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
activation_fn=tf.nn.relu)
conv_out = layers.flatten(out)
with tf.variable_scope("action_value"):
action_out = conv_out
for hidden in hiddens:
action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
if layer_norm:
action_out = layers.layer_norm(action_out, center=True, scale=True)
action_out = tf.nn.relu(action_out)
action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)
if dueling:
with tf.variable_scope("state_value"):
state_out = conv_out
for hidden in hiddens:
state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
if layer_norm:
state_out = layers.layer_norm(state_out, center=True, scale=True)
state_out = tf.nn.relu(state_out)
state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
q_out = state_score + action_scores_centered
else:
q_out = action_scores
return q_out
示例11: model_fn
def model_fn(x, target, mode, params):
"""Model function for Estimator."""
y_ = tf.cast(target, tf.float32)
x_image = tf.reshape(x, [-1, 28, 28, 1])
# first convolutional layer
h_conv1 = layers.convolution2d(x_image, 32, [5,5])
h_pool1 = layers.max_pool2d(h_conv1, [2,2])
# second convolutional layer
h_conv2 = layers.convolution2d(h_pool1, 64, [5,5])
h_pool2 = layers.max_pool2d(h_conv2, [2,2])
# densely connected layer
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = layers.fully_connected(h_pool2_flat, 1024)
h_fc1_drop = layers.dropout(
h_fc1, keep_prob=params["dropout"],
is_training=(mode == ModeKeys.TRAIN))
# readout layer
y_conv = layers.fully_connected(h_fc1_drop, 10, activation_fn=None)
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
train_op = tf.contrib.layers.optimize_loss(
loss=cross_entropy,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=params["learning_rate"],
optimizer="Adam")
predictions = tf.argmax(y_conv, 1)
return predictions, cross_entropy, train_op
示例12: dnn_logits_fn
def dnn_logits_fn():
"""Builds the logits from the input layer."""
previous_layer = input_layer
for layer_id, num_hidden_units in enumerate(dnn_hidden_units):
with variable_scope.variable_scope(
"hiddenlayer_%d" % layer_id,
values=(previous_layer,)) as hidden_layer_scope:
net = layers.fully_connected(
previous_layer,
num_hidden_units,
activation_fn=dnn_activation_fn,
variables_collections=[dnn_parent_scope],
scope=hidden_layer_scope)
if dnn_dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = layers.dropout(net, keep_prob=(1.0 - dnn_dropout))
_add_hidden_layer_summary(net, hidden_layer_scope.name)
previous_layer = net
with variable_scope.variable_scope(
"logits", values=(previous_layer,)) as logits_scope:
dnn_logits = layers.fully_connected(
previous_layer,
head.logits_dimension,
activation_fn=None,
variables_collections=[dnn_parent_scope],
scope=logits_scope)
_add_hidden_layer_summary(dnn_logits, logits_scope.name)
return dnn_logits
示例13: __call__
def __call__(self, x, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
shared = tcl.fully_connected(x, 128, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(0, 0.02))
q = tcl.fully_connected(shared, 10, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02)) # 10 classes
return q
示例14: create
def create(cls, embeddings, labels, **kwargs):
model = cls()
model.embeddings = embeddings
model._record_state(**kwargs)
model.lengths_key = kwargs.get('lengths_key')
model.labels = labels
nc = len(labels)
# This only exists to make exporting easier
model.pdrop_value = kwargs.get('dropout', 0.5)
model.dropin_value = kwargs.get('dropin', {})
model.sess = kwargs.get('sess', tf.Session())
model.lengths = kwargs.get('lengths', tf.placeholder(tf.int32, [None], name="lengths"))
model.y = kwargs.get('y', tf.placeholder(tf.int32, [None, None], name="y"))
model.pdrop_in = kwargs.get('dropin', 0.0)
model.labels = labels
model.crf = bool(kwargs.get('crf', False))
model.crf_mask = bool(kwargs.get('crf_mask', False))
model.span_type = kwargs.get('span_type')
model.proj = bool(kwargs.get('proj', False))
model.feed_input = bool(kwargs.get('feed_input', False))
model.activation_type = kwargs.get('activation', 'tanh')
model.constraint = kwargs.get('constraint')
# Wrap the constraint in a non-trainable variable so that it is saved
# into the checkpoint. This means we won't need to recreate the actual
# values of it when we reload the model
if model.constraint is not None:
constraint = []
for i, c in enumerate(model.constraint):
constraint.append(tf.get_variable("constraint_{}".format(i), initializer=c, trainable=False))
model.constraint = constraint
embedseq = model.embed(**kwargs)
seed = np.random.randint(10e8)
enc_out = model.encode(embedseq, **kwargs)
with tf.variable_scope("output") as model.out_scope:
if model.feed_input is True:
enc_out = tf.concat(axis=2, values=[enc_out, embedseq])
# Converts seq to tensor, back to (B,T,W)
T = tf.shape(enc_out)[1]
H = enc_out.get_shape()[2]
# Flatten from [B x T x H] - > [BT x H]
enc_out_bt_x_h = tf.reshape(enc_out, [-1, H])
init = xavier_initializer(True, seed)
with tf.contrib.slim.arg_scope([fully_connected], weights_initializer=init):
if model.proj is True:
hidden = tf.layers.dropout(fully_connected(enc_out_bt_x_h, H,
activation_fn=tf_activation(model.activation_type)), model.pdrop_value, training=TRAIN_FLAG())
preds = fully_connected(hidden, nc, activation_fn=None, weights_initializer=init)
else:
preds = fully_connected(enc_out_bt_x_h, nc, activation_fn=None, weights_initializer=init)
model.probs = tf.reshape(preds, [-1, T, nc], name="probs")
return model
示例15: dueling_model
def dueling_model(img_in, num_actions, scope, noisy=False, reuse=False,
concat_softmax=False):
"""As described in https://arxiv.org/abs/1511.06581"""
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32, kernel_size=8,
stride=4, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=4,
stride=2, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=3,
stride=1, activation_fn=tf.nn.relu)
out = layers.flatten(out)
with tf.variable_scope("state_value"):
if noisy:
# Apply noisy network on fully connected layers
# ref: https://arxiv.org/abs/1706.10295
state_hidden = noisy_dense(out, name='noisy_fc1', size=512,
activation_fn=tf.nn.relu)
state_score = noisy_dense(state_hidden, name='noisy_fc2',
size=1)
else:
state_hidden = layers.fully_connected(
out,
num_outputs=512,
activation_fn=tf.nn.relu
)
state_score = layers.fully_connected(state_hidden,
num_outputs=1,
activation_fn=None)
with tf.variable_scope("action_value"):
if noisy:
# Apply noisy network on fully connected layers
# ref: https://arxiv.org/abs/1706.10295
actions_hidden = noisy_dense(out, name='noisy_fc1', size=512,
activation_fn=tf.nn.relu)
action_scores = noisy_dense(actions_hidden, name='noisy_fc2',
size=num_actions)
else:
actions_hidden = layers.fully_connected(
out,
num_outputs=512,
activation_fn=tf.nn.relu
)
action_scores = layers.fully_connected(
actions_hidden,
num_outputs=num_actions,
activation_fn=None
)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores = action_scores - tf.expand_dims(
action_scores_mean,
1
)
return state_score + action_scores