当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.orthogonal_initializer方法代码示例

本文整理汇总了Python中tensorflow.orthogonal_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.orthogonal_initializer方法的具体用法?Python tensorflow.orthogonal_initializer怎么用?Python tensorflow.orthogonal_initializer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.orthogonal_initializer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_variable_initializer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def get_variable_initializer(hparams):
  """Get variable initializer from hparams."""
  if not hparams.initializer:
    return None

  if not tf.contrib.eager.in_eager_mode():
    tf.logging.info("Using variable initializer: %s", hparams.initializer)
  if hparams.initializer == "orthogonal":
    return tf.orthogonal_initializer(gain=hparams.initializer_gain)
  elif hparams.initializer == "uniform":
    max_val = 0.1 * hparams.initializer_gain
    return tf.random_uniform_initializer(-max_val, max_val)
  elif hparams.initializer == "normal_unit_scaling":
    return tf.variance_scaling_initializer(
        hparams.initializer_gain, mode="fan_avg", distribution="normal")
  elif hparams.initializer == "uniform_unit_scaling":
    return tf.variance_scaling_initializer(
        hparams.initializer_gain, mode="fan_avg", distribution="uniform")
  elif hparams.initializer == "xavier":
    return tf.contrib.layers.xavier_initializer()
  else:
    raise ValueError("Unrecognized initializer: %s" % hparams.initializer) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:24,代码来源:optimize.py

示例2: init_param

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def init_param(self):
		idm = self.input_dim
		hs = self.hidden_size
		ws = len(self.window)
		nf = idm * ws
		# author's special initlaization strategy.
		self.Wemb = tf.get_variable(name=self.name + '_Wemb', shape=[self.vocab_size, idm], dtype=tf.float32, initializer=tf.random_uniform_initializer())
		self.bhid = tf.get_variable(name=self.name + '_bhid', shape=[self.vocab_size], dtype=tf.float32, initializer=tf.zeros_initializer())
		self.Vhid = tf.get_variable(name=self.name + '_Vhid', shape=[hs, idm], dtype=tf.float32, initializer=tf.random_uniform_initializer())
		self.Vhid = dot(self.Vhid, self.Wemb) # [hidden_size, vocab_size]
		self.i2h_W = tf.get_variable(name=self.name + '_i2h_W', shape=[idm, hs * 4], dtype=tf.float32, initializer=tf.random_uniform_initializer())
		self.h2h_W = tf.get_variable(name=self.name + '_h2h_W', shape=[hs, hs * 4], dtype=tf.float32, initializer=tf.orthogonal_initializer())
		self.z2h_W = tf.get_variable(name=self.name + '_z2h_W', shape=[nf, hs * 4], dtype=tf.float32, initializer=tf.random_uniform_initializer())
		b_init_1 = tf.zeros((hs,))
		b_init_2 = tf.ones((hs,)) * 3
		b_init_3 = tf.zeros((hs,))
		b_init_4 = tf.zeros((hs,))
		b_init = tf.concat([b_init_1, b_init_2, b_init_3, b_init_4], axis=0)
		# b_init = tf.constant(b_init)
		# self.b = tf.get_variable(name=self.name + '_b', shape=[hs * 4], dtype=tf.float32, initializer=b_init)
		self.b = tf.get_variable(name=self.name + '_b', dtype=tf.float32, initializer=b_init) # ValueError: If initializer is a constant, do not specify shape.
		self.C0 = tf.get_variable(name=self.name + '_C0', shape=[nf, hs], dtype=tf.float32, initializer=tf.random_uniform_initializer())
		self.b0 = tf.get_variable(name=self.name + '_b0', shape=[hs], dtype=tf.float32, initializer=tf.zeros_initializer()) 
开发者ID:Jeff-HOU,项目名称:UROP-Adversarial-Feature-Matching-for-Text-Generation,代码行数:25,代码来源:generator.py

示例3: _transform_compare

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def _transform_compare(self, sequence, sequence_length, reuse=False):
        with tf.variable_scope('transform_compare', reuse=reuse) as _:
            sequence = tf.nn.dropout(sequence, keep_prob=self.dropout_keep_prob)
            projection = tf.contrib.layers.fully_connected(inputs=sequence,
                                                           num_outputs=self.representation_size,
                                                           weights_initializer=tf.random_normal_initializer(0.0, 0.01),
                                                           biases_initializer=tf.zeros_initializer(),
                                                           activation_fn=tf.nn.relu)
            cell_fw = tf.contrib.rnn.LSTMCell(self.representation_size, state_is_tuple=True, reuse=reuse,
                                              initializer=tf.orthogonal_initializer())
            cell_bw = tf.contrib.rnn.LSTMCell(self.representation_size, state_is_tuple=True, reuse=reuse,
                                              initializer=tf.orthogonal_initializer())
            outputs, output_states = tf.nn.bidirectional_dynamic_rnn(
                cell_fw=cell_fw, cell_bw=cell_bw,
                inputs=projection,
                sequence_length=sequence_length,
                dtype=tf.float32)
        return tf.concat(outputs, axis=2) 
开发者ID:uclnlp,项目名称:inferbeddings,代码行数:20,代码来源:esim.py

示例4: get_initializer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def get_initializer(params):
    if params.initializer == "uniform":
        max_val = 0.1 * params.initializer_gain
        return tf.random_uniform_initializer(-max_val, max_val)
    elif params.initializer == "normal":
        return tf.random_normal_initializer(0.0, params.initializer_gain)
    elif params.initializer == "orthogonal":
        return tf.orthogonal_initializer(params.initializer_gain)
    elif params.initializer == "normal_unit_scaling":
        return tf.variance_scaling_initializer(params.initializer_gain,
                                               mode="fan_avg",
                                               distribution="normal")
    elif params.initializer == "uniform_unit_scaling":
        return tf.variance_scaling_initializer(params.initializer_gain,
                                               mode="fan_avg",
                                               distribution="uniform")
    else:
        raise ValueError("Unrecognized initializer: %s" % params.initializer) 
开发者ID:Imagist-Shuo,项目名称:UNMT-SPR,代码行数:20,代码来源:train.py

示例5: hidden

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def hidden(layer, hidden_size, hidden_func=nonlin.relu, hidden_keep_prob=1.):
  """"""

  layer_shape = nn.get_sizes(layer)
  input_size = layer_shape.pop()
  weights = tf.get_variable('Weights', shape=[input_size, hidden_size])#, initializer=tf.orthogonal_initializer)
  biases = tf.get_variable('Biases', shape=[hidden_size], initializer=tf.zeros_initializer)
  if hidden_keep_prob < 1.:
    if len(layer_shape) > 1:
      noise_shape = tf.stack(layer_shape[:-1] + [1, input_size])
    else:
      noise_shape = None
    layer = nn.dropout(layer, hidden_keep_prob, noise_shape=noise_shape)
  
  layer = nn.reshape(layer, [-1, input_size])
  layer = tf.matmul(layer, weights) + biases
  layer = hidden_func(layer)
  layer = nn.reshape(layer, layer_shape + [hidden_size])
  return layer

#=============================================================== 
开发者ID:tdozat,项目名称:Parser-v3,代码行数:23,代码来源:classifiers.py

示例6: nature_cnn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def nature_cnn(obs_batch, dense=tf.layers.dense):
    """
    Apply the CNN architecture from the Nature DQN paper.

    The result is a batch of feature vectors.
    """
    conv_kwargs = {
        'activation': tf.nn.relu,
        'kernel_initializer': tf.orthogonal_initializer(gain=math.sqrt(2))
    }
    with tf.variable_scope('layer_1'):
        cnn_1 = tf.layers.conv2d(obs_batch, 32, 8, 4, **conv_kwargs)
    with tf.variable_scope('layer_2'):
        cnn_2 = tf.layers.conv2d(cnn_1, 64, 4, 2, **conv_kwargs)
    with tf.variable_scope('layer_3'):
        cnn_3 = tf.layers.conv2d(cnn_2, 64, 3, 1, **conv_kwargs)
    flat_size = product([x.value for x in cnn_3.get_shape()[1:]])
    flat_in = tf.reshape(cnn_3, (tf.shape(cnn_3)[0], int(flat_size)))
    return dense(flat_in, 512, **conv_kwargs) 
开发者ID:flyyufelix,项目名称:sonic_contest,代码行数:21,代码来源:util.py

示例7: make_cnn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def make_cnn(convs, padding, inpt, initializer=None):
    if initializer is None:
        initializer = tf.orthogonal_initializer(np.sqrt(2.0))
    out = inpt
    with tf.variable_scope('convnet'):
        for num_outputs, kernel_size, stride in convs:
            out = layers.convolution2d(
                out,
                num_outputs=num_outputs,
                kernel_size=kernel_size,
                stride=stride,
                padding=padding,
                activation_fn=tf.nn.relu,
                weights_initializer=initializer
            )
    return out 
开发者ID:takuseno,项目名称:ppo,代码行数:18,代码来源:network.py

示例8: head

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def head(endpoints, embedding_dim, is_training):
    endpoints['head_output'] = slim.fully_connected(
        endpoints['model_output'], 1024, normalizer_fn=slim.batch_norm,
        normalizer_params={
            'decay': 0.9,
            'epsilon': 1e-5,
            'scale': True,
            'is_training': is_training,
            'updates_collections': tf.GraphKeys.UPDATE_OPS,
        })

    endpoints['emb_raw'] = slim.fully_connected(
        endpoints['head_output'], embedding_dim, activation_fn=None,
        weights_initializer=tf.orthogonal_initializer(), scope='emb')
    endpoints['emb'] = tf.identity(endpoints['emb_raw'], name="out_emb")
    
    return endpoints 
开发者ID:VisualComputingInstitute,项目名称:triplet-reid,代码行数:19,代码来源:fc1024.py

示例9: head

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def head(endpoints, embedding_dim, is_training):
    endpoints['head_output'] = slim.fully_connected(
        endpoints['model_output'], 1024, normalizer_fn=slim.batch_norm,
        normalizer_params={
            'decay': 0.9,
            'epsilon': 1e-5,
            'scale': True,
            'is_training': is_training,
            'updates_collections': tf.GraphKeys.UPDATE_OPS,
        })

    endpoints['emb_raw'] = slim.fully_connected(
        endpoints['head_output'], embedding_dim, activation_fn=None,
        weights_initializer=tf.orthogonal_initializer(), scope='emb')
    endpoints['emb'] = tf.nn.l2_normalize(endpoints['emb_raw'], -1, name="out_emb")

    return endpoints 
开发者ID:VisualComputingInstitute,项目名称:triplet-reid,代码行数:19,代码来源:fc1024_normalize.py

示例10: head

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def head(endpoints, embedding_dim, is_training):
    endpoints['head_output'] = slim.fully_connected(
        endpoints['model_output'], 1024, normalizer_fn=slim.batch_norm,
        normalizer_params={
            'decay': 0.9,
            'epsilon': 1e-5,
            'scale': True,
            'is_training': is_training,
            'updates_collections': tf.GraphKeys.UPDATE_OPS,
        })

    endpoints['emb'] = endpoints['emb_raw'] = slim.fully_connected(
        endpoints['head_output'], embedding_dim, activation_fn=None,
        weights_initializer=tf.orthogonal_initializer(), scope='emb')

    return endpoints 
开发者ID:knwng,项目名称:vehicle-triplet-reid,代码行数:18,代码来源:fc1024.py

示例11: head

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def head(endpoints, embedding_dim, is_training):
    endpoints['head_output'] = slim.fully_connected(
        endpoints['model_output'], 1024, normalizer_fn=slim.batch_norm,
        normalizer_params={
            'decay': 0.9,
            'epsilon': 1e-5,
            'scale': True,
            'is_training': is_training,
            'updates_collections': tf.GraphKeys.UPDATE_OPS,
        })

    endpoints['emb_raw'] = slim.fully_connected(
        endpoints['head_output'], embedding_dim, activation_fn=None,
        weights_initializer=tf.orthogonal_initializer(), scope='emb')
    endpoints['emb'] = tf.nn.l2_normalize(endpoints['emb_raw'], -1)

    return endpoints 
开发者ID:knwng,项目名称:vehicle-triplet-reid,代码行数:19,代码来源:fc1024_normalize.py

示例12: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def __init__(self, x, y, params, mode, scope='Encoder', reuse=tf.AUTO_REUSE):
    self.x = x
    self.y = y
    self.params = params
    self.batch_size = tf.shape(x)[0]
    self.vocab_size = params['encoder_vocab_size']
    self.emb_size = params['encoder_emb_size']
    self.hidden_size = params['encoder_hidden_size']
    self.encoder_length = params['encoder_length']
    self.weight_decay = params['weight_decay']
    self.mode = mode
    self.time_major = params['time_major']
    self.is_training = self.mode == tf.estimator.ModeKeys.TRAIN
    if not self.is_training:
      self.params['encoder_dropout'] = 0.0
      self.params['mlp_dropout'] = 0.0

    #initializer = tf.orthogonal_initializer()
    initializer = tf.random_uniform_initializer(-0.1, 0.1)
    tf.get_variable_scope().set_initializer(initializer)
    self.build_graph(scope=scope, reuse=reuse) 
开发者ID:renqianluo,项目名称:NAO,代码行数:23,代码来源:encoder.py

示例13: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def __init__(self, x, y, params, mode, scope='Encoder', reuse=False):
    self.x = x
    self.y = y
    self.params = params
    self.batch_size = tf.shape(x)[0]
    self.vocab_size = params['encoder_vocab_size']
    self.emb_size = params['encoder_emb_size']
    self.hidden_size = params['encoder_hidden_size']
    self.encoder_length = params['encoder_length']
    self.weight_decay = params['weight_decay']
    self.mode = mode
    self.time_major = params['time_major']
    self.weighted_loss = params['weighted_loss']
    self.is_training = self.mode == tf.estimator.ModeKeys.TRAIN
    if not self.is_training:
      self.params['encoder_dropout'] = 0.0
      self.params['mlp_dropout'] = 0.0

    #initializer = tf.orthogonal_initializer()
    self.build_graph(scope, reuse) 
开发者ID:renqianluo,项目名称:NAO,代码行数:22,代码来源:encoder.py

示例14: matmul_2d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def matmul_2d(x, out_dimension, drop_prob=None):
    '''Multiplies 2-d tensor by weights.

    Args:
        x: a tensor with shape [batch, dimension]
        out_dimension: a number

    Returns:
        a tensor with shape [batch, out_dimension]

    Raises:
    '''
    W = tf.get_variable(
        name='weights',
        shape=[x.shape[1], out_dimension],
        dtype=tf.float32,
        initializer=tf.orthogonal_initializer())
    if drop_prob is not None:
        W = tf.nn.dropout(W, drop_prob)
        log.info('W is dropout')

    return tf.matmul(x, W) 
开发者ID:deepmipt,项目名称:DeepPavlov,代码行数:24,代码来源:operations.py

示例15: lookup_initializer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import orthogonal_initializer [as 别名]
def lookup_initializer(self, name, config):
        if name == 'orthogonal':
            orthogonal_gain = self.config_option("orthogonal_gain", 1.0, config)
            return self.orthogonal_initializer(orthogonal_gain)
        elif name == 'he_normal':
            return self.he_normal_initializer()
        elif name == 'xavier':
            return self.xavier_initializer()
        elif name == 'stylegan':
            return self.stylegan_initializer(config or self.config)
        elif name == 'random_normal':
            return self.random_initializer(self.config_option("random_stddev", 0.02, config))
        else:
            raise Exception("initializer not found", name) 
开发者ID:HyperGAN,项目名称:HyperGAN,代码行数:16,代码来源:ops.py


注:本文中的tensorflow.orthogonal_initializer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。