本文整理汇总了Python中tensorflow.orthogonal_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.orthogonal_initializer方法的具体用法?Python tensorflow.orthogonal_initializer怎么用?Python tensorflow.orthogonal_initializer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.orthogonal_initializer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_variable_initializer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def get_variable_initializer(hparams):
"""Get variable initializer from hparams."""
if not hparams.initializer:
return None
if not tf.contrib.eager.in_eager_mode():
tf.logging.info("Using variable initializer: %s", hparams.initializer)
if hparams.initializer == "orthogonal":
return tf.orthogonal_initializer(gain=hparams.initializer_gain)
elif hparams.initializer == "uniform":
max_val = 0.1 * hparams.initializer_gain
return tf.random_uniform_initializer(-max_val, max_val)
elif hparams.initializer == "normal_unit_scaling":
return tf.variance_scaling_initializer(
hparams.initializer_gain, mode="fan_avg", distribution="normal")
elif hparams.initializer == "uniform_unit_scaling":
return tf.variance_scaling_initializer(
hparams.initializer_gain, mode="fan_avg", distribution="uniform")
elif hparams.initializer == "xavier":
return tf.contrib.layers.xavier_initializer()
else:
raise ValueError("Unrecognized initializer: %s" % hparams.initializer)
示例2: init_param
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def init_param(self):
idm = self.input_dim
hs = self.hidden_size
ws = len(self.window)
nf = idm * ws
# author's special initlaization strategy.
self.Wemb = tf.get_variable(name=self.name + '_Wemb', shape=[self.vocab_size, idm], dtype=tf.float32, initializer=tf.random_uniform_initializer())
self.bhid = tf.get_variable(name=self.name + '_bhid', shape=[self.vocab_size], dtype=tf.float32, initializer=tf.zeros_initializer())
self.Vhid = tf.get_variable(name=self.name + '_Vhid', shape=[hs, idm], dtype=tf.float32, initializer=tf.random_uniform_initializer())
self.Vhid = dot(self.Vhid, self.Wemb) # [hidden_size, vocab_size]
self.i2h_W = tf.get_variable(name=self.name + '_i2h_W', shape=[idm, hs * 4], dtype=tf.float32, initializer=tf.random_uniform_initializer())
self.h2h_W = tf.get_variable(name=self.name + '_h2h_W', shape=[hs, hs * 4], dtype=tf.float32, initializer=tf.orthogonal_initializer())
self.z2h_W = tf.get_variable(name=self.name + '_z2h_W', shape=[nf, hs * 4], dtype=tf.float32, initializer=tf.random_uniform_initializer())
b_init_1 = tf.zeros((hs,))
b_init_2 = tf.ones((hs,)) * 3
b_init_3 = tf.zeros((hs,))
b_init_4 = tf.zeros((hs,))
b_init = tf.concat([b_init_1, b_init_2, b_init_3, b_init_4], axis=0)
# b_init = tf.constant(b_init)
# self.b = tf.get_variable(name=self.name + '_b', shape=[hs * 4], dtype=tf.float32, initializer=b_init)
self.b = tf.get_variable(name=self.name + '_b', dtype=tf.float32, initializer=b_init) # ValueError: If initializer is a constant, do not specify shape.
self.C0 = tf.get_variable(name=self.name + '_C0', shape=[nf, hs], dtype=tf.float32, initializer=tf.random_uniform_initializer())
self.b0 = tf.get_variable(name=self.name + '_b0', shape=[hs], dtype=tf.float32, initializer=tf.zeros_initializer())
示例3: _transform_compare
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def _transform_compare(self, sequence, sequence_length, reuse=False):
with tf.variable_scope('transform_compare', reuse=reuse) as _:
sequence = tf.nn.dropout(sequence, keep_prob=self.dropout_keep_prob)
projection = tf.contrib.layers.fully_connected(inputs=sequence,
num_outputs=self.representation_size,
weights_initializer=tf.random_normal_initializer(0.0, 0.01),
biases_initializer=tf.zeros_initializer(),
activation_fn=tf.nn.relu)
cell_fw = tf.contrib.rnn.LSTMCell(self.representation_size, state_is_tuple=True, reuse=reuse,
initializer=tf.orthogonal_initializer())
cell_bw = tf.contrib.rnn.LSTMCell(self.representation_size, state_is_tuple=True, reuse=reuse,
initializer=tf.orthogonal_initializer())
outputs, output_states = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw, cell_bw=cell_bw,
inputs=projection,
sequence_length=sequence_length,
dtype=tf.float32)
return tf.concat(outputs, axis=2)
示例4: get_initializer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def get_initializer(params):
if params.initializer == "uniform":
max_val = 0.1 * params.initializer_gain
return tf.random_uniform_initializer(-max_val, max_val)
elif params.initializer == "normal":
return tf.random_normal_initializer(0.0, params.initializer_gain)
elif params.initializer == "orthogonal":
return tf.orthogonal_initializer(params.initializer_gain)
elif params.initializer == "normal_unit_scaling":
return tf.variance_scaling_initializer(params.initializer_gain,
mode="fan_avg",
distribution="normal")
elif params.initializer == "uniform_unit_scaling":
return tf.variance_scaling_initializer(params.initializer_gain,
mode="fan_avg",
distribution="uniform")
else:
raise ValueError("Unrecognized initializer: %s" % params.initializer)
示例5: hidden
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def hidden(layer, hidden_size, hidden_func=nonlin.relu, hidden_keep_prob=1.):
""""""
layer_shape = nn.get_sizes(layer)
input_size = layer_shape.pop()
weights = tf.get_variable('Weights', shape=[input_size, hidden_size])#, initializer=tf.orthogonal_initializer)
biases = tf.get_variable('Biases', shape=[hidden_size], initializer=tf.zeros_initializer)
if hidden_keep_prob < 1.:
if len(layer_shape) > 1:
noise_shape = tf.stack(layer_shape[:-1] + [1, input_size])
else:
noise_shape = None
layer = nn.dropout(layer, hidden_keep_prob, noise_shape=noise_shape)
layer = nn.reshape(layer, [-1, input_size])
layer = tf.matmul(layer, weights) + biases
layer = hidden_func(layer)
layer = nn.reshape(layer, layer_shape + [hidden_size])
return layer
#===============================================================
示例6: nature_cnn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def nature_cnn(obs_batch, dense=tf.layers.dense):
"""
Apply the CNN architecture from the Nature DQN paper.
The result is a batch of feature vectors.
"""
conv_kwargs = {
'activation': tf.nn.relu,
'kernel_initializer': tf.orthogonal_initializer(gain=math.sqrt(2))
}
with tf.variable_scope('layer_1'):
cnn_1 = tf.layers.conv2d(obs_batch, 32, 8, 4, **conv_kwargs)
with tf.variable_scope('layer_2'):
cnn_2 = tf.layers.conv2d(cnn_1, 64, 4, 2, **conv_kwargs)
with tf.variable_scope('layer_3'):
cnn_3 = tf.layers.conv2d(cnn_2, 64, 3, 1, **conv_kwargs)
flat_size = product([x.value for x in cnn_3.get_shape()[1:]])
flat_in = tf.reshape(cnn_3, (tf.shape(cnn_3)[0], int(flat_size)))
return dense(flat_in, 512, **conv_kwargs)
示例7: make_cnn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def make_cnn(convs, padding, inpt, initializer=None):
if initializer is None:
initializer = tf.orthogonal_initializer(np.sqrt(2.0))
out = inpt
with tf.variable_scope('convnet'):
for num_outputs, kernel_size, stride in convs:
out = layers.convolution2d(
out,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
padding=padding,
activation_fn=tf.nn.relu,
weights_initializer=initializer
)
return out
示例8: head
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def head(endpoints, embedding_dim, is_training):
endpoints['head_output'] = slim.fully_connected(
endpoints['model_output'], 1024, normalizer_fn=slim.batch_norm,
normalizer_params={
'decay': 0.9,
'epsilon': 1e-5,
'scale': True,
'is_training': is_training,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
})
endpoints['emb_raw'] = slim.fully_connected(
endpoints['head_output'], embedding_dim, activation_fn=None,
weights_initializer=tf.orthogonal_initializer(), scope='emb')
endpoints['emb'] = tf.identity(endpoints['emb_raw'], name="out_emb")
return endpoints
示例9: head
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def head(endpoints, embedding_dim, is_training):
endpoints['head_output'] = slim.fully_connected(
endpoints['model_output'], 1024, normalizer_fn=slim.batch_norm,
normalizer_params={
'decay': 0.9,
'epsilon': 1e-5,
'scale': True,
'is_training': is_training,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
})
endpoints['emb_raw'] = slim.fully_connected(
endpoints['head_output'], embedding_dim, activation_fn=None,
weights_initializer=tf.orthogonal_initializer(), scope='emb')
endpoints['emb'] = tf.nn.l2_normalize(endpoints['emb_raw'], -1, name="out_emb")
return endpoints
示例10: head
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def head(endpoints, embedding_dim, is_training):
endpoints['head_output'] = slim.fully_connected(
endpoints['model_output'], 1024, normalizer_fn=slim.batch_norm,
normalizer_params={
'decay': 0.9,
'epsilon': 1e-5,
'scale': True,
'is_training': is_training,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
})
endpoints['emb'] = endpoints['emb_raw'] = slim.fully_connected(
endpoints['head_output'], embedding_dim, activation_fn=None,
weights_initializer=tf.orthogonal_initializer(), scope='emb')
return endpoints
示例11: head
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def head(endpoints, embedding_dim, is_training):
endpoints['head_output'] = slim.fully_connected(
endpoints['model_output'], 1024, normalizer_fn=slim.batch_norm,
normalizer_params={
'decay': 0.9,
'epsilon': 1e-5,
'scale': True,
'is_training': is_training,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
})
endpoints['emb_raw'] = slim.fully_connected(
endpoints['head_output'], embedding_dim, activation_fn=None,
weights_initializer=tf.orthogonal_initializer(), scope='emb')
endpoints['emb'] = tf.nn.l2_normalize(endpoints['emb_raw'], -1)
return endpoints
示例12: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def __init__(self, x, y, params, mode, scope='Encoder', reuse=tf.AUTO_REUSE):
self.x = x
self.y = y
self.params = params
self.batch_size = tf.shape(x)[0]
self.vocab_size = params['encoder_vocab_size']
self.emb_size = params['encoder_emb_size']
self.hidden_size = params['encoder_hidden_size']
self.encoder_length = params['encoder_length']
self.weight_decay = params['weight_decay']
self.mode = mode
self.time_major = params['time_major']
self.is_training = self.mode == tf.estimator.ModeKeys.TRAIN
if not self.is_training:
self.params['encoder_dropout'] = 0.0
self.params['mlp_dropout'] = 0.0
#initializer = tf.orthogonal_initializer()
initializer = tf.random_uniform_initializer(-0.1, 0.1)
tf.get_variable_scope().set_initializer(initializer)
self.build_graph(scope=scope, reuse=reuse)
示例13: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def __init__(self, x, y, params, mode, scope='Encoder', reuse=False):
self.x = x
self.y = y
self.params = params
self.batch_size = tf.shape(x)[0]
self.vocab_size = params['encoder_vocab_size']
self.emb_size = params['encoder_emb_size']
self.hidden_size = params['encoder_hidden_size']
self.encoder_length = params['encoder_length']
self.weight_decay = params['weight_decay']
self.mode = mode
self.time_major = params['time_major']
self.weighted_loss = params['weighted_loss']
self.is_training = self.mode == tf.estimator.ModeKeys.TRAIN
if not self.is_training:
self.params['encoder_dropout'] = 0.0
self.params['mlp_dropout'] = 0.0
#initializer = tf.orthogonal_initializer()
self.build_graph(scope, reuse)
示例14: matmul_2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def matmul_2d(x, out_dimension, drop_prob=None):
'''Multiplies 2-d tensor by weights.
Args:
x: a tensor with shape [batch, dimension]
out_dimension: a number
Returns:
a tensor with shape [batch, out_dimension]
Raises:
'''
W = tf.get_variable(
name='weights',
shape=[x.shape[1], out_dimension],
dtype=tf.float32,
initializer=tf.orthogonal_initializer())
if drop_prob is not None:
W = tf.nn.dropout(W, drop_prob)
log.info('W is dropout')
return tf.matmul(x, W)
示例15: lookup_initializer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def lookup_initializer(self, name, config):
if name == 'orthogonal':
orthogonal_gain = self.config_option("orthogonal_gain", 1.0, config)
return self.orthogonal_initializer(orthogonal_gain)
elif name == 'he_normal':
return self.he_normal_initializer()
elif name == 'xavier':
return self.xavier_initializer()
elif name == 'stylegan':
return self.stylegan_initializer(config or self.config)
elif name == 'random_normal':
return self.random_initializer(self.config_option("random_stddev", 0.02, config))
else:
raise Exception("initializer not found", name)