本文整理汇总了Python中tensorflow.contrib.layers.xavier_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python layers.xavier_initializer方法的具体用法?Python layers.xavier_initializer怎么用?Python layers.xavier_initializer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.layers
的用法示例。
在下文中一共展示了layers.xavier_initializer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import xavier_initializer [as 别名]
def build(self, input_shape):
"""
Input shape is (None, 10, 7, 7, 1024)
:param input_shape:
:return:
"""
assert len(input_shape) == 5
_, self.n_timesteps_in, self.side_dim1, self.side_dim2, self.n_channels = input_shape
initializer = contrib_layers.xavier_initializer()
weight_shape = [self.n_channels, self.n_timesteps_in, self.n_timesteps_out]
bias_shape = [self.n_channels, 1, self.n_timesteps_out]
with tf.variable_scope(self.name) as scope:
self.conv_weights = tf.get_variable('dense_weights', shape=weight_shape, initializer=initializer)
self.conv_biases = tf.get_variable('dense_biases', shape=bias_shape, initializer=tf.constant_initializer(0.1))
self.trainable_weights = [self.conv_weights, self.conv_biases]
super(DepthwiseDenseLayer, self).build(input_shape)
示例2: _build_net
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import xavier_initializer [as 别名]
def _build_net(self, input_BO, scope):
""" The Actor network.
Uses ReLUs for all hidden layers, but a tanh to the output to bound the
action. This follows their 'low-dimensional networks' using 400 and 300
units for the hidden layers. Set `reuse=False`. I don't use batch
normalization or their precise weight initialization.
"""
with tf.variable_scope(scope, reuse=False):
hidden1 = layers.fully_connected(input_BO,
num_outputs=400,
weights_initializer=layers.xavier_initializer(),
activation_fn=tf.nn.relu)
hidden2 = layers.fully_connected(hidden1,
num_outputs=300,
weights_initializer=layers.xavier_initializer(),
activation_fn=tf.nn.relu)
actions_BA = layers.fully_connected(hidden2,
num_outputs=self.ac_dim,
weights_initializer=layers.xavier_initializer(),
activation_fn=tf.nn.tanh) # Note the tanh!
# This should broadcast, but haven't tested with ac_dim > 1.
actions_BA = tf.multiply(actions_BA, self.ac_high)
return actions_BA
示例3: _make_network
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import xavier_initializer [as 别名]
def _make_network(self, data_in, out_dim):
""" Build the network with the same architecture following OpenAI's paper.
Returns the final *layer* of the network, which corresponds to our
chosen action. There is no non-linearity for the last layer because
different envs have different action ranges.
"""
with tf.variable_scope("ESAgent", reuse=False):
out = data_in
out = layers.fully_connected(out, num_outputs=64,
weights_initializer = layers.xavier_initializer(uniform=True),
#weights_initializer = utils.normc_initializer(0.5),
activation_fn = tf.nn.tanh)
out = layers.fully_connected(out, num_outputs=64,
weights_initializer = layers.xavier_initializer(uniform=True),
#weights_initializer = utils.normc_initializer(0.5),
activation_fn = tf.nn.tanh)
out = layers.fully_connected(out, num_outputs=out_dim,
weights_initializer = layers.xavier_initializer(uniform=True),
#weights_initializer = utils.normc_initializer(0.5),
activation_fn = None)
return out
示例4: _create_embeddings
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import xavier_initializer [as 别名]
def _create_embeddings(self, device='/cpu:0'):
""" Create all embedding matrices in this function.
:param device: The storage device of all the embeddings. If
you are using multi-gpus, it is ideal to store
the embeddings on CPU to avoid costly GPU-to-GPU
memory copying. The embeddings should be stored under
variable scope self.embedding_scope
:return:
"""
with tf.device(device):
with tf.variable_scope(self.embedding_scope):
self.word_embedding = tf.get_variable('word_embedding',
[self.n_vocab + self.word_oov, self.word_embedding_size],
dtype=tf.float32,
initializer=layers.xavier_initializer())
示例5: _create_embeddings
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import xavier_initializer [as 别名]
def _create_embeddings(self, device='/cpu:0'):
""" Create all embedding matrices in this function.
:param device: The storage device of all the embeddings. If
you are using multi-gpus, it is ideal to store
the embeddings on CPU to avoid costly GPU-to-GPU
memory copying. The embeddings should be stored under
variable scope self.embedding_scope
:return:
"""
with tf.device(device):
with tf.variable_scope(self.embedding_scope):
self.word_embedding = tf.get_variable('word_embedding',
[self.n_vocab + self.word_oov, self.word_embedding_size],
dtype=tf.float32,
initializer=layers.xavier_initializer(),
trainable=not self.fix_embedding)
示例6: dense_layer
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import xavier_initializer [as 别名]
def dense_layer(inputs, output_size, activation, use_bias, name):
"""
A simple dense layer.
:param inputs: batch of inputs.
:param output_size: dimensionality of the output.
:param activation: activation function to use.
:param use_bias: whether to have bias weights or not.
:param name: name used to scope this operation.
:return: batch of outputs.
"""
return tf.layers.dense(
inputs=inputs,
units=output_size,
kernel_initializer=xavier_initializer(uniform=False),
use_bias=use_bias,
bias_initializer=tf.random_normal_initializer(stddev=1e-3),
activation=activation,
name=name,
reuse=tf.AUTO_REUSE)
示例7: __init__
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import xavier_initializer [as 别名]
def __init__(self,
config,
args,
word_vecs,
init = tf.random_uniform_initializer(minval=-0.01, maxval=0.01), # init = layers.xavier_initializer(),
name='HATN'):
self.cfg = config
self.args = args
self.word_vecs = word_vecs
self.init = init
self.name = name
self.memory_size = self.cfg.memory_size
self.sent_size = self.cfg.sent_size
self.embed_size = self.cfg.embed_size
self.hidden_size = self.cfg.hidden_size
self.l2_reg_lambda = self.cfg.l2_reg_lambda
self.max_grad_norm = self.cfg.max_grad_norm
self.hops = self.cfg.hops
self.build_vars()
self.build_eval_op()
示例8: __init__
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import xavier_initializer [as 别名]
def __init__(self,
config,
args,
word_vecs,
init = tf.random_uniform_initializer(minval=-0.01, maxval=0.01), # init = layers.xavier_initializer(),
name='PNet'):
self.cfg = config
self.args = args
self.word_vecs = word_vecs
self.init = init
self.name = name
self.memory_size = self.cfg.memory_size
self.sent_size = self.cfg.sent_size
self.embed_size = self.cfg.embed_size
self.hidden_size = self.cfg.hidden_size
self.l2_reg_lambda = self.cfg.l2_reg_lambda
self.max_grad_norm = self.cfg.max_grad_norm
self.hops = self.cfg.hops
self.build_vars()
self.build_eval_op()
示例9: inference
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import xavier_initializer [as 别名]
def inference(input_images):
with slim.arg_scope([slim.conv2d], kernel_size=3, padding='SAME'):
with slim.arg_scope([slim.max_pool2d], kernel_size=2):
x = slim.conv2d(input_images, num_outputs=32, weights_initializer=initializers.xavier_initializer(),
scope='conv1_1')
x = slim.conv2d(x, num_outputs=32, weights_initializer=initializers.xavier_initializer(), scope='conv1_2')
x = slim.max_pool2d(x, scope='pool1')
x = slim.conv2d(x, num_outputs=64, weights_initializer=initializers.xavier_initializer(), scope='conv2_1')
x = slim.conv2d(x, num_outputs=64, weights_initializer=initializers.xavier_initializer(), scope='conv2_2')
x = slim.max_pool2d(x, scope='pool2')
x = slim.conv2d(x, num_outputs=128, weights_initializer=initializers.xavier_initializer(), scope='conv3_1')
x = slim.conv2d(x, num_outputs=128, weights_initializer=initializers.xavier_initializer(), scope='conv3_2')
x = slim.max_pool2d(x, scope='pool3')
x = slim.flatten(x, scope='flatten')
feature = slim.fully_connected(x, num_outputs=2, activation_fn=None, scope='fc1')
x = tflearn.prelu(feature)
x = slim.fully_connected(x, num_outputs=10, activation_fn=None, scope='fc2')
return x, feature
示例10: EncoderPCNN
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import xavier_initializer [as 别名]
def EncoderPCNN(self, is_training, init_vec=None):
with tf.variable_scope("sentence-encoder", dtype=tf.float32, initializer=xavier(), reuse=tf.AUTO_REUSE):
input_dim = self.input_embedding.shape[2]
mask_embedding = tf.constant([[0,0,0],[1,0,0],[0,1,0],[0,0,1]], dtype=np.float32)
pcnn_mask = tf.nn.embedding_lookup(mask_embedding, self.mask)
input_sentence = tf.expand_dims(self.input_embedding, axis=1)
with tf.variable_scope("conv2d"):
conv_kernel = self._GetVar(init_vec=init_vec,key='convkernel',name='kernel',
shape=[1,3,input_dim,FLAGS.hidden_size],trainable=True)
conv_bias = self._GetVar(init_vec=init_vec,key='convbias',name='bias',shape=[FLAGS.hidden_size],trainable=True)
x = tf.layers.conv2d(inputs = input_sentence, filters=FLAGS.hidden_size,
kernel_size=[1,3], strides=[1, 1], padding='same', reuse=tf.AUTO_REUSE)
x = tf.reshape(x, [-1, FLAGS.max_length, FLAGS.hidden_size, 1])
x = tf.reduce_max(tf.reshape(pcnn_mask, [-1, 1, FLAGS.max_length, 3]) * tf.transpose(x,[0, 2, 1, 3]), axis = 2)
x = tf.nn.relu(tf.reshape(x, [-1, FLAGS.hidden_size * 3]))
return x
示例11: EncoderLSTM
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import xavier_initializer [as 别名]
def EncoderLSTM(self, is_training, init_vec=None):
with tf.variable_scope("sentence-encoder", dtype=tf.float32, initializer=xavier(), reuse=tf.AUTO_REUSE):
input_sentence = tf.layers.dropout(self.input_embedding, rate = self.keep_prob, training = is_training)
fw_cell = tf.contrib.rnn.BasicLSTMCell(FLAGS.hidden_size, state_is_tuple=True)
bw_cell = tf.contrib.rnn.BasicLSTMCell(FLAGS.hidden_size, state_is_tuple=True)
outputs, states = tf.nn.bidirectional_dynamic_rnn(
fw_cell, bw_cell, input_sentence,
sequence_length = self.len,
dtype = tf.float32,
scope = 'bi-dynamic-rnn')
fw_states, bw_states = states
if isinstance(fw_states, tuple):
fw_states = fw_states[0]
bw_states = bw_states[0]
x = tf.concat(states, axis=1)
return x
示例12: conv2d_transpose
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import xavier_initializer [as 别名]
def conv2d_transpose(
inputs,
out_shape,
kernel_size=(5, 5),
stride=(1, 1),
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=layers.xavier_initializer(),
scope=None,
reuse=None):
batchsize = tf.shape(inputs)[0]
in_channels = int(inputs.get_shape()[-1])
output_shape = tf.stack([batchsize, out_shape[0],
out_shape[1], out_shape[2]])
filter_shape = [kernel_size[0], kernel_size[1], out_shape[2], in_channels]
with tf.variable_scope(scope, 'Conv2d_transpose', [inputs], reuse=reuse):
w = tf.get_variable('weights', filter_shape,
initializer=weights_initializer)
outputs = tf.nn.conv2d_transpose(
inputs, w, output_shape=output_shape,
strides=[1, stride[0], stride[1], 1])
outputs.set_shape([None] + out_shape)
if not normalizer_fn:
biases = tf.get_variable('biases', [out_shape[2]],
initializer=tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
示例13: policy_model
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import xavier_initializer [as 别名]
def policy_model(data_in, action_dim):
""" Create a neural network representing the BC policy. It will be trained
using standard supervised learning techniques.
Parameters
----------
data_in: [Tensor]
The input (a placeholder) to the network, with leading dimension
representing the batch size.
action_dim: [int]
Number of actions, each of which (at least for MuJoCo) is
continuous-valued.
Returns
-------
out [Tensor]
The output tensor which represents the predicted (or desired, if
testing) action to take for the agent.
"""
with tf.variable_scope("BCNetwork", reuse=False):
out = data_in
out = layers.fully_connected(out, num_outputs=100,
weights_initializer=layers.xavier_initializer(uniform=True),
activation_fn=tf.nn.tanh)
out = layers.fully_connected(out, num_outputs=100,
weights_initializer=layers.xavier_initializer(uniform=True),
activation_fn=tf.nn.tanh)
out = layers.fully_connected(out, num_outputs=action_dim,
weights_initializer=layers.xavier_initializer(uniform=True),
activation_fn=None)
return out
示例14: __init__
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import xavier_initializer [as 别名]
def __init__(self, session, ob_dim=None, n_epochs=20, stepsize=1e-3):
""" The network gets constructed upon initialization so future calls to
self.fit will remember this.
Right now we assume a preprocessing which results ob_dim*2+1 dimensions,
and we assume a fixed neural network architecture (input-50-50-1, fully
connected with tanh nonlineariites), which we should probably change.
The number of outputs is one, so that ypreds_n is the predicted vector
of state values, to be compared against ytargs_n. Since ytargs_n is of
shape (n,), we need to apply a "squeeze" on the final predictions, which
would otherwise be of shape (n,1). Bleh.
"""
# Value function V(s_t) (or b(s_t)), parameterized as a neural network.
self.ob_no = tf.placeholder(shape=[None, ob_dim*2+1], name="nnvf_ob", dtype=tf.float32)
self.h1 = layers.fully_connected(self.ob_no,
num_outputs=50,
weights_initializer=layers.xavier_initializer(uniform=True),
activation_fn=tf.nn.tanh)
self.h2 = layers.fully_connected(self.h1,
num_outputs=50,
weights_initializer=layers.xavier_initializer(uniform=True),
activation_fn=tf.nn.tanh)
self.ypreds_n = layers.fully_connected(self.h2,
num_outputs=1,
weights_initializer=layers.xavier_initializer(uniform=True),
activation_fn=None)
self.ypreds_n = tf.reshape(self.ypreds_n, [-1]) # (?,1) --> (?,). =)
# Form the loss function, which is the simple (mean) L2 error.
self.n_epochs = n_epochs
self.lrate = stepsize
self.ytargs_n = tf.placeholder(shape=[None], name="nnvf_y", dtype=tf.float32)
self.l2_error = tf.reduce_mean(tf.square(self.ypreds_n - self.ytargs_n))
self.fit_op = tf.train.AdamOptimizer(self.lrate).minimize(self.l2_error)
self.sess = session
示例15: add_predictions
# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import xavier_initializer [as 别名]
def add_predictions(net, end_points):
pose_xyz = tf.layers.dense(
net, 3, name='cls3_fc_pose_xyz', kernel_initializer=xavier_initializer())
end_points['cls3_fc_pose_xyz'] = pose_xyz
pose_wpqr = tf.layers.dense(
net,
4,
name='cls3_fc_pose_wpqr',
kernel_initializer=xavier_initializer())
end_points['cls3_fc_pose_wpqr'] = pose_wpqr