本文整理匯總了Python中tensorflow.glorot_normal_initializer方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.glorot_normal_initializer方法的具體用法?Python tensorflow.glorot_normal_initializer怎麽用?Python tensorflow.glorot_normal_initializer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow
的用法示例。
在下文中一共展示了tensorflow.glorot_normal_initializer方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import glorot_normal_initializer [as 別名]
def __init__(self, name, layer_conf):
self._name = layer_conf.pop('name', None) or name
activation_name = layer_conf.get('activation', None)
if activation_name:
layer_conf['activation'] = Layer.activation_dict[activation_name]
self._kernel_initializer = layer_conf.pop('kernel_initializer', None)
if isinstance(self._kernel_initializer, str):
assert self._kernel_initializer in ('random_normal_initializer',
'random_uniform_initializer',
'glorot_normal_initializer',
'glorot_uniform_initializer'), \
"Invalid value of kernel_initializer, available value is one of " \
"['random_normal_initializer', 'random_uniform_initializer'," \
"'glorot_normal_initializer', 'glorot_uniform_initializer']"
self._kernel_initializer = Layer.initializer_dict[
self._kernel_initializer]
elif (isinstance(self._kernel_initializer, int)
or isinstance(self._kernel_initializer, float)):
self._kernel_initializer = tf.constant_initializer(
value=self._kernel_initializer)
示例2: _residual_conv
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import glorot_normal_initializer [as 別名]
def _residual_conv(self, input_signals: tf.Tensor, name: str):
with tf.variable_scope(name):
# Initialized as described in the paper.
# Note: this may be equivalent to tf.glorot_normal_initializer
init_deviat = np.sqrt(4 / self.conv_features)
convolution_filters = get_variable(
"convolution_filters",
[self.kernel_width, self.conv_features,
2 * self.conv_features],
initializer=tf.random_normal_initializer(stddev=init_deviat))
bias = get_variable(
name="conv_bias",
shape=[2 * self.conv_features],
initializer=tf.zeros_initializer())
conv = (tf.nn.conv1d(input_signals, convolution_filters, 1, "SAME")
+ bias)
return glu(conv) + input_signals
示例3: encoder_layer
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import glorot_normal_initializer [as 別名]
def encoder_layer(input_sequence, dropout_keep_prob_tensor):
self_attention_layer = multi_head_attention(input_sequence, dropout_keep_prob_tensor)
if hp.self_attention_sublayer_residual_and_norm:
self_attention_layer = tf.add(self_attention_layer, input_sequence)
self_attention_layer = tf.contrib.layers.layer_norm(self_attention_layer)
# Add the 2-layer feed-forward with residual connections and layer normalization. Transformer uses it.
if hp.ffnn_sublayer:
ffnn_sublayer_output = tf.layers.dense(self_attention_layer, hp.model_dim, activation=tf.nn.relu, use_bias=True,
kernel_initializer=tf.glorot_normal_initializer(), bias_initializer=tf.zeros_initializer())
ffnn_sublayer_output = tf.layers.dense(ffnn_sublayer_output, hp.model_dim, activation=tf.nn.relu, use_bias=True,
kernel_initializer=tf.glorot_normal_initializer(), bias_initializer=tf.zeros_initializer())
if hp.ffnn_sublayer_dropout:
ffnn_sublayer_output = tf.nn.dropout(ffnn_sublayer_output, keep_prob=dropout_keep_prob_tensor) # ignore some input info to regularize the model
ffnn_sublayer_output = tf.add(ffnn_sublayer_output, self_attention_layer)
encoder_layer_output = tf.contrib.layers.layer_norm(ffnn_sublayer_output)
else:
encoder_layer_output = self_attention_layer
return encoder_layer_output
示例4: create_gconv_variables
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import glorot_normal_initializer [as 別名]
def create_gconv_variables(self, name_block, i, in_feat, fnet_feat, out_feat, rank_theta, stride_th1, stride_th2):
name = name_block + "_nl_" + str(i) + "_flayer0"
self.W[name] = tf.get_variable(name, [in_feat, fnet_feat], dtype=tf.float32, initializer=tf.glorot_normal_initializer())
self.b[name] = tf.get_variable("b_"+name, [1, fnet_feat], dtype=tf.float32, initializer=tf.zeros_initializer())
self.dn_vars = self.dn_vars + [self.W[name], self.b[name]]
name = name_block + "_nl_" + str(i) + "_flayer1"
self.W[name+"_th1"] = tf.get_variable(name+"_th1", [fnet_feat, stride_th1*rank_theta], dtype=tf.float32, initializer=tf.random_normal_initializer(0,1.0/(np.sqrt(fnet_feat+0.0)*np.sqrt(in_feat+0.0))))
self.b[name+"_th1"] = tf.get_variable(name+"_b_th1", [1, rank_theta, in_feat], dtype=tf.float32, initializer=tf.zeros_initializer())
self.W[name+"_th2"] = tf.get_variable(name+"_th2", [fnet_feat, stride_th2*rank_theta], dtype=tf.float32, initializer=tf.random_normal_initializer(0,1.0/(np.sqrt(fnet_feat+0.0)*np.sqrt(in_feat+0.0))))
self.b[name+"_th2"] = tf.get_variable(name+"_b_th2", [1, rank_theta, out_feat], dtype=tf.float32, initializer=tf.zeros_initializer())
self.W[name+"_thl"] = tf.get_variable(name+"_thl", [fnet_feat, rank_theta], dtype=tf.float32, initializer=tf.random_normal_initializer(0,1.0/np.sqrt(rank_theta+0.0)))
self.b[name+"_thl"] = tf.get_variable(name+"_b_thl", [1, rank_theta], dtype=tf.float32, initializer=tf.zeros_initializer())
self.dn_vars = self.dn_vars + [self.W[name+"_th1"],self.b[name+"_th1"],self.W[name+"_th2"],self.b[name+"_th2"],self.W[name+"_thl"],self.b[name+"_thl"]]
name = name_block + "_l_" + str(i)
self.W[name] = tf.get_variable(name, [3, 3, in_feat, out_feat], dtype=tf.float32, initializer=tf.glorot_normal_initializer())
self.dn_vars = self.dn_vars + [self.W[name]]
name = name_block + "_" + str(i)
self.b[name] = tf.get_variable(name, [1, out_feat], dtype=tf.float32, initializer=tf.zeros_initializer())
self.dn_vars = self.dn_vars + [self.b[name]]
示例5: cnn_with_2dfeature
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import glorot_normal_initializer [as 別名]
def cnn_with_2dfeature(self, x2d, reuse=False):
with tf.variable_scope('discriminator', reuse=reuse) as scope:
block_num = 8
filters = 16
kernel_size = [4, 4]
act = tf.nn.relu
#kernel_initializer = tf.truncated_normal_initializer(stddev=0.01)
kernel_initializer = tf.glorot_normal_initializer()
#kernel_initializer = None
bias_initializer = tf.zeros_initializer()
#kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=0.001)
kernel_regularizer = None
bias_regularizer = None
for i in np.arange(block_num):
inputs = x2d if i == 0 else conv_
conv_ = tf.layers.conv2d(inputs=inputs, filters=filters,
kernel_size=kernel_size, strides=(1,1), padding='same', activation=act,
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
logits = tf.layers.conv2d(inputs=conv_, filters=1,
kernel_size=kernel_size, strides=(1,1), padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
logits = tf.reshape(logits, (-1, tf.shape(logits)[1], tf.shape(logits)[2]))
return tf.sigmoid(logits), logits
示例6: resn_with_2dfeature
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import glorot_normal_initializer [as 別名]
def resn_with_2dfeature(self, x2d, reuse=False):
with tf.variable_scope('discriminator', reuse=reuse) as scope:
block_num = 8
filters = 32
kernel_size = [4, 4]
act = tf.nn.relu
#kernel_initializer = tf.truncated_normal_initializer(stddev=0.01)
kernel_initializer = tf.glorot_normal_initializer()
#kernel_initializer = None
bias_initializer = tf.zeros_initializer()
#kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=0.001)
kernel_regularizer = None
bias_regularizer = None
prev = tf.layers.conv2d(inputs=x2d, filters=filters,
kernel_size=kernel_size, strides=(1,1), padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
for i in np.arange(block_num):
conv_ = act(prev)
conv_ = tf.layers.conv2d(inputs=conv_, filters=filters,
kernel_size=kernel_size, strides=(1,1), padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
conv_ = act(conv_)
conv_ = tf.layers.conv2d(inputs=conv_, filters=filters,
kernel_size=kernel_size, strides=(1,1), padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
prev = tf.add(conv_, prev)
logits = tf.layers.conv2d(inputs=prev, filters=1,
kernel_size=kernel_size, strides=(1,1), padding='same',
kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
logits = tf.reshape(logits, (-1, tf.shape(logits)[1], tf.shape(logits)[2]))
return tf.sigmoid(logits), logits
示例7: classifier
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import glorot_normal_initializer [as 別名]
def classifier(self, x, scales, filters, repeat, training, getter=None, **kwargs):
del kwargs
leaky_relu = functools.partial(tf.nn.leaky_relu, alpha=0.1)
bn_args = dict(training=training, momentum=0.999)
def conv_args(k, f):
return dict(padding='same',
kernel_initializer=tf.random_normal_initializer(stddev=tf.rsqrt(0.5 * k * k * f)))
def residual(x0, filters, stride=1, activate_before_residual=False):
x = leaky_relu(tf.layers.batch_normalization(x0, **bn_args))
if activate_before_residual:
x0 = x
x = tf.layers.conv2d(x, filters, 3, strides=stride, **conv_args(3, filters))
x = leaky_relu(tf.layers.batch_normalization(x, **bn_args))
x = tf.layers.conv2d(x, filters, 3, **conv_args(3, filters))
if x0.get_shape()[3] != filters:
x0 = tf.layers.conv2d(x0, filters, 1, strides=stride, **conv_args(1, filters))
return x0 + x
with tf.variable_scope('classify', reuse=tf.AUTO_REUSE, custom_getter=getter):
y = tf.layers.conv2d((x - self.dataset.mean) / self.dataset.std, 16, 3, **conv_args(3, 16))
for scale in range(scales):
y = residual(y, filters << scale, stride=2 if scale else 1, activate_before_residual=scale == 0)
for i in range(repeat - 1):
y = residual(y, filters << scale)
y = leaky_relu(tf.layers.batch_normalization(y, **bn_args))
y = tf.reduce_mean(y, [1, 2])
logits = tf.layers.dense(y, self.nclass, kernel_initializer=tf.glorot_normal_initializer())
return logits
示例8: project_qkv
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import glorot_normal_initializer [as 別名]
def project_qkv(input_sequnce, output_dim, use_bias_and_activation=True):
if use_bias_and_activation:
return tf.layers.dense(input_sequnce, output_dim, activation=tf.nn.relu, use_bias=True,
kernel_initializer=tf.glorot_normal_initializer(), bias_initializer=tf.zeros_initializer())
else:
return tf.layers.dense(input_sequnce, output_dim, activation=tf.nn.relu, use_bias=False, kernel_initializer=tf.glorot_normal_initializer())
示例9: multi_head_attention
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import glorot_normal_initializer [as 別名]
def multi_head_attention(input_sequence, dropout_keep_prob_tensor):
'''
Returns a self-attention layer, configured as to the parameters in the global hparams dictionary.
'''
# make sure the input word embedding dimension divides by the number of desired heads.
assert hp.model_dim % hp.self_attention_heads == 0
qkv_dim = hp.model_dim / hp.self_attention_heads
# Construct the Q, K, V matrices
q = project_qkv(input_sequence, hp.model_dim, hp.qkv_projections_bias_and_activation)
k = project_qkv(input_sequence, hp.model_dim, hp.qkv_projections_bias_and_activation)
v = project_qkv(input_sequence, hp.model_dim, hp.qkv_projections_bias_and_activation)
qs, ks, vs = split_heads(q, k, v)
if hp.use_relative_positions:
outputs = dot_product_attention_relative(qs, ks, vs)
else:
outputs = scaled_dot_product(qs, ks, vs)
san_output = concatenate_heads(outputs)
if hp.self_attention_sublayer_bias_and_activation:
san_output = tf.layers.dense(san_output, hp.model_dim, activation=tf.nn.relu, use_bias=True,
kernel_initializer=tf.glorot_normal_initializer(), bias_initializer=tf.zeros_initializer())
else:
san_output = tf.layers.dense(san_output, hp.model_dim, activation=tf.nn.relu, use_bias=False, kernel_initializer=tf.glorot_normal_initializer())
if hp.self_attention_sublayer_dropout:
san_output = tf.nn.dropout(san_output, keep_prob=(dropout_keep_prob_tensor - 0.2)) # ignore some input info to regularize the model
print("multi-head attention dropout more:", 0.2)
return san_output
示例10: transformerClassifier
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import glorot_normal_initializer [as 別名]
def transformerClassifier(x_tensor, output_dim, wordIndxToVec_tensor, dropoutKeep_tensor, max_sentence_length):
with tf.variable_scope("Embedding_Layer"):
emb = tf.nn.embedding_lookup(wordIndxToVec_tensor, x_tensor)
# Add positional encodings to the embeddings we feed to the encoder.
if hp.include_positional_encoding:
with tf.variable_scope("Add_Position_Encoding"):
posEnc = positional_encoding(hp.model_dim, max_sentence_length)
emb = tf.add(emb, posEnc, name="Add_Positional_Encoding")
if hp.input_emb_apply_dropout:
with tf.variable_scope("Input_Embeddings_Dropout"):
emb = tf.nn.dropout(emb, keep_prob=dropoutKeep_tensor) # ignore some input info to regularize the model
for i in range(1, hp.num_layers + 1):
with tf.variable_scope("Stack-Layer-{0}".format(i)):
encoder_output = encoder_layer(emb, dropout_keep_prob_tensor=dropoutKeep_tensor)
emb = encoder_output
# Simply average the final sequence position representations to create a fixed size "sentence representation".
sentence_representation = tf.reduce_mean(encoder_output, axis=1) # [batch_size, model_dim]
with tf.variable_scope("Sentence_Representation_And_Output"):
sentence_representation = tf.layers.dense(sentence_representation, hp.model_dim, activation=tf.nn.relu, use_bias=True,
kernel_initializer=tf.glorot_normal_initializer(), bias_initializer=tf.zeros_initializer())
if hp.sentence_representation_dropout:
sentence_representation = tf.nn.dropout(sentence_representation, keep_prob=dropoutKeep_tensor) # ignore some input info to regularize the model
prediction_logits = tf.layers.dense(sentence_representation, output_dim, activation=None, use_bias=False, kernel_initializer=tf.glorot_normal_initializer())
return prediction_logits
示例11: create_variable_initializer
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import glorot_normal_initializer [as 別名]
def create_variable_initializer(initializer_type,
random_seed=None,
data_type=tf.float32):
"""create variable initializer"""
if initializer_type == "zero":
initializer = tf.zeros_initializer
elif initializer_type == "one":
initializer = tf.ones_initializer
elif initializer_type == "orthogonal":
initializer = tf.orthogonal_initializer(seed=random_seed, dtype=data_type)
elif initializer_type == "random_uniform":
initializer = tf.random_uniform_initializer(seed=random_seed, dtype=data_type)
elif initializer_type == "glorot_uniform":
initializer = tf.glorot_uniform_initializer(seed=random_seed, dtype=data_type)
elif initializer_type == "xavier_uniform":
initializer = tf.contrib.layers.xavier_initializer(uniform=True, seed=random_seed, dtype=tf.float32)
elif initializer_type == "random_normal":
initializer = tf.random_normal_initializer(seed=random_seed, dtype=data_type)
elif initializer_type == "truncated_normal":
initializer = tf.truncated_normal_initializer(seed=random_seed, dtype=data_type)
elif initializer_type == "glorot_normal":
initializer = tf.glorot_normal_initializer(seed=random_seed, dtype=data_type)
elif initializer_type == "xavier_normal":
initializer = tf.contrib.layers.xavier_initializer(uniform=False, seed=random_seed, dtype=tf.float32)
elif initializer_type == "variance_scaling":
initializer = tf.contrib.layers.variance_scaling_initializer(factor=2.0,
mode='FAN_IN', uniform=False, seed=random_seed, dtype=tf.float32)
else:
initializer = None
return initializer
示例12: sparse_linear
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import glorot_normal_initializer [as 別名]
def sparse_linear(xs, shape, name: str, actfunc=identity):
assert len(shape) == 2
w = tf.get_variable(name, initializer=tf.glorot_normal_initializer(),
shape=shape)
bias = tf.Variable(tf.zeros(shape[1]))
return actfunc(tf.sparse_tensor_dense_matmul(xs, w) + bias), w
示例13: linear
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import glorot_normal_initializer [as 別名]
def linear(xs, shape, name: str, actfunc=identity):
assert len(shape) == 2
w = tf.get_variable(name, initializer=tf.glorot_normal_initializer(),
shape=shape)
bias = tf.Variable(tf.zeros(shape[1]))
output = tf.matmul(xs, w) + bias
return actfunc(output)
示例14: separable_conv2d
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import glorot_normal_initializer [as 別名]
def separable_conv2d(input, output, name, is_training, kernel_size, depth_multiplier=1,
reuse=None, with_bn=True, activation=tf.nn.elu):
conv2d = tf.layers.separable_conv2d(input, output, kernel_size=kernel_size, strides=(1, 1), padding='VALID',
activation=activation,
depth_multiplier=depth_multiplier,
depthwise_initializer=tf.glorot_normal_initializer(),
pointwise_initializer=tf.glorot_normal_initializer(),
depthwise_regularizer=tf.contrib.layers.l2_regularizer(scale=1.0),
pointwise_regularizer=tf.contrib.layers.l2_regularizer(scale=1.0),
reuse=reuse, name=name, use_bias=not with_bn)
return batch_normalization(conv2d, is_training, name + '_bn', reuse) if with_bn else conv2d
示例15: depthwise_conv2d
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import glorot_normal_initializer [as 別名]
def depthwise_conv2d(input, depth_multiplier, name, is_training, kernel_size,
reuse=None, with_bn=True, activation=tf.nn.elu):
conv2d = tf.contrib.layers.separable_conv2d(input, num_outputs=None, kernel_size=kernel_size, padding='VALID',
activation_fn=activation,
depth_multiplier=depth_multiplier,
weights_initializer=tf.glorot_normal_initializer(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=1.0),
biases_initializer=None if with_bn else tf.zeros_initializer(),
biases_regularizer=None if with_bn else tf.contrib.layers.l2_regularizer(
scale=1.0),
reuse=reuse, scope=name)
return batch_normalization(conv2d, is_training, name + '_bn', reuse) if with_bn else conv2d