本文整理汇总了Python中tensorflow.uniform_unit_scaling_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.uniform_unit_scaling_initializer方法的具体用法?Python tensorflow.uniform_unit_scaling_initializer怎么用?Python tensorflow.uniform_unit_scaling_initializer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.uniform_unit_scaling_initializer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: init_vq_bottleneck
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def init_vq_bottleneck(bottleneck_size, hidden_size):
"""Get lookup table for VQ bottleneck."""
means = tf.get_variable(
name="means",
shape=[bottleneck_size, hidden_size],
initializer=tf.uniform_unit_scaling_initializer())
ema_count = tf.get_variable(
name="ema_count",
shape=[bottleneck_size],
initializer=tf.constant_initializer(0),
trainable=False)
with tf.colocate_with(means):
ema_means = tf.get_variable(
name="ema_means",
initializer=means.initialized_value(),
trainable=False)
return means, ema_means, ema_count
示例2: get_vq_bottleneck
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def get_vq_bottleneck(bottleneck_size, hidden_size):
"""Get lookup table for VQ bottleneck."""
with tf.variable_scope("vq", reuse=tf.AUTO_REUSE):
means = tf.get_variable(
name="means",
shape=[bottleneck_size, hidden_size],
initializer=tf.uniform_unit_scaling_initializer())
ema_count = tf.get_variable(
name="ema_count",
shape=[bottleneck_size],
initializer=tf.constant_initializer(0),
trainable=False)
with tf.colocate_with(means):
ema_means = tf.get_variable(
name="ema_means",
initializer=means.initialized_value(),
trainable=False)
return means, ema_means, ema_count
示例3: fully_connected
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def fully_connected(x, out_dim):
"""FullyConnected layer
Parameters:
x (Tensor): Input tensor to the fully connected layer
out_dim (int): Output dimension of the fully connected layer.
Return:
The Tensor corresponding to the fully connected layer output.
"""
w = tf.get_variable(
'DW', [x.get_shape()[1], out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
示例4: get_vq_codebook
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def get_vq_codebook(codebook_size, hidden_size):
"""Get lookup table for VQ bottleneck."""
with tf.variable_scope("vq", reuse=tf.AUTO_REUSE):
means = tf.get_variable(
name="means",
shape=[codebook_size, hidden_size],
initializer=tf.uniform_unit_scaling_initializer())
ema_count = tf.get_variable(
name="ema_count",
shape=[codebook_size],
initializer=tf.constant_initializer(0),
trainable=False)
with tf.colocate_with(means):
ema_means = tf.get_variable(
name="ema_means",
initializer=means.initialized_value(),
trainable=False)
return means, ema_means, ema_count
示例5: add_embeddings
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def add_embeddings(self):
"""
embedding层
"""
with tf.variable_scope('embedding'):
if self.config.embeddings is not None:
embeddings = tf.Variable(self.config.embeddings, name="embeddings", trainable=False)
else:
embeddings = tf.get_variable('embeddings', shape=[self.config.vocab_size, self.config.embedding_size], initializer=tf.uniform_unit_scaling_initializer())
q_embed = tf.nn.embedding_lookup(embeddings, self.q)
aplus_embed = tf.nn.embedding_lookup(embeddings, self.aplus)
aminus_embed = tf.nn.embedding_lookup(embeddings, self.aminus)
q_embed = tf.nn.dropout(q_embed, keep_prob=self.keep_prob)
aplus_embed = tf.nn.dropout(aplus_embed, keep_prob=self.keep_prob)
aminus_embed = tf.nn.dropout(aminus_embed, keep_prob=self.keep_prob)
return q_embed, aplus_embed, aminus_embed
示例6: _HL_layer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def _HL_layer(self, bottom, n_weight, name):
"""
全连接层
"""
assert len(bottom.get_shape()) == 3
n_prev_weight = bottom.get_shape()[-1]
max_len = bottom.get_shape()[1]
initer = tf.truncated_normal_initializer(stddev=0.01)
W = tf.get_variable(name + 'W', dtype=tf.float32, shape=[n_prev_weight, n_weight],
initializer=tf.uniform_unit_scaling_initializer())
b = tf.get_variable(name + 'b', dtype=tf.float32,
initializer=tf.constant(0.1, shape=[n_weight], dtype=tf.float32))
bottom_2 = tf.reshape(bottom, [-1, n_prev_weight])
hl = tf.nn.bias_add(tf.matmul(bottom_2, W), b)
hl_tanh = tf.nn.tanh(hl)
HL = tf.reshape(hl_tanh, [-1, max_len, n_weight])
return HL
示例7: _build
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def _build(self, embeddings):
if embeddings is not None:
self.Embedding = tf.Variable(tf.to_float(embeddings), trainable=False, name='Embedding')
else:
self.Embedding = tf.get_variable('Embedding', shape=[self.vocab_size, self.embedding_size],
initializer=tf.uniform_unit_scaling_initializer())
self.q_embed = tf.nn.dropout(tf.nn.embedding_lookup(self.Embedding, self._ques), keep_prob=self.dropout_keep_prob)
self.a_embed = tf.nn.dropout(tf.nn.embedding_lookup(self.Embedding, self._ans), keep_prob=self.dropout_keep_prob)
with tf.variable_scope('siamese') as scope:
# 计算隐藏和卷积层
hl_q = self._HL_layer(self.q_embed, self.hidden_size, 'HL_layer')
conv1_q = self._cnn_layer(hl_q)
scope.reuse_variables()
hl_a = self._HL_layer(self.a_embed, self.hidden_size, 'HL_layer')
conv1_a = self._cnn_layer(hl_a)
with tf.variable_scope('fc') as scope:
con = tf.concat([conv1_q, conv1_a], axis=-1)
logits = self.fc_layer(con, 1, 'fc_layer')
res = tf.nn.sigmoid(logits)
return logits, res
示例8: build_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def build_graph(self):
parameters = self.parameters
with tf.variable_scope(name_or_scope=self.scope, initializer=tf.uniform_unit_scaling_initializer()):
seq_ids_pl, seq_other_ids_pls, inputs = self.build_input_graph(vocab_size=parameters['vocab_size'],
emb_size=parameters['emb_size'],
word_window_size=parameters['word_window_size'],
word_vocab_size=parameters['word_vocab_size'],
word_emb_size=parameters['word_emb_size'])
stag_ids_pl, seq_lengths_pl, is_train_pl, cost_op, train_cost_op, scores_op, summary_op = \
self.build_tagging_graph(inputs=inputs,
num_tags=parameters['num_tags'],
use_crf=parameters['use_crf'],
lamd=parameters['lamd'],
dropout_emb=parameters['dropout_emb'],
dropout_hidden=parameters['dropout_hidden'],
hidden_layers=parameters['hidden_layers'],
channels=parameters['channels'],
kernel_size=parameters['kernel_size'],
use_bn=parameters['use_bn'],
use_wn=parameters['use_wn'],
active_type=parameters['active_type'])
self.seq_ids_pl = seq_ids_pl
self.seq_other_ids_pls = seq_other_ids_pls
self.stag_ids_pl = stag_ids_pl
self.seq_lengths_pl = seq_lengths_pl
self.is_train_pl = is_train_pl
self.cost_op = cost_op
self.train_cost_op = train_cost_op
self.scores_op = scores_op
self.summary_op = summary_op
示例9: _fully_connected
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def _fully_connected(self, x, out_dim):
"""FullyConnected layer for final output."""
x = tf.reshape(x, [self.hps.batch_size, -1])
w = tf.get_variable(
'DW', [x.get_shape()[1], out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
示例10: score
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def score(feature_vec):
W = tf.get_variable("W", shape=[feature_vec.get_shape()[1],1], initializer=tf.uniform_unit_scaling_initializer()) # init_weight([int(feature_vec.get_shape()[1]),1])
return tf.matmul(feature_vec,W)
示例11: _fully_connected
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def _fully_connected(self, x, out_dim):
"""FullyConnected layer for final output."""
num_non_batch_dimensions = len(x.get_shape())
prod_non_batch_dimensions = 1
for ii in range(num_non_batch_dimensions - 1):
prod_non_batch_dimensions *= int(x.get_shape()[ii + 1])
x = tf.reshape(x, [tf.shape(x)[0], -1])
w = tf.get_variable(
'DW', [prod_non_batch_dimensions, out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
示例12: _fully_connected
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def _fully_connected(self, x, out_dim):
"""FullyConnected layer for final output."""
num_non_batch_dimensions = len(x.shape)
prod_non_batch_dimensions = 1
for ii in range(num_non_batch_dimensions - 1):
prod_non_batch_dimensions *= int(x.shape[ii + 1])
x = tf.reshape(x, [tf.shape(x)[0], -1])
w = tf.get_variable(
'DW', [prod_non_batch_dimensions, out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
示例13: sharded_variable
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def sharded_variable(name, shape, num_shards, dtype=tf.float32, transposed=False):
# The final size of the sharded variable may be larger than requested.
# This should be fine for embeddings.
shard_size = int((shape[0] + num_shards - 1) / num_shards)
if transposed:
initializer = tf.uniform_unit_scaling_initializer(dtype=dtype, full_shape=[shape[1], shape[0]])
else:
initializer = tf.uniform_unit_scaling_initializer(dtype=dtype, full_shape=shape)
return [tf.get_variable(name + "_%d" % i, [shard_size, shape[1]], initializer=initializer, dtype=dtype)
for i in range(num_shards)]
# XXX(rafal): Code below copied from rnn_cell.py
示例14: linear
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def linear(input_, output_dim, scope=None, stddev=.7):
unif = tf.uniform_unit_scaling_initializer()
norm = tf.random_normal_initializer(stddev=stddev)
const = tf.constant_initializer(0.0)
with tf.variable_scope(scope or 'linear'):
#w = tf.get_variable('w', [input_.get_shape()[1], output_dim], initializer=unif)
w = tf.get_variable('w', [input_.get_shape()[1], output_dim], initializer=norm)
b = tf.get_variable('b', [output_dim], initializer=const)
return tf.matmul(input_, w) + b
示例15: _fully_connected
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uniform_unit_scaling_initializer [as 别名]
def _fully_connected(self, x, out_dim):
"""FullyConnected layer for final output."""
x = tf.contrib.layers.flatten(x)
w = tf.get_variable(
'DW', [x.get_shape()[1], out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)