当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.ones_initializer方法代码示例

本文整理汇总了Python中tensorflow.ones_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.ones_initializer方法的具体用法?Python tensorflow.ones_initializer怎么用?Python tensorflow.ones_initializer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.ones_initializer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_initializer [as 别名]
def __init__(self, component, name, shape, dtype):
    """Construct variables to normalize an input of given shape.

    Arguments:
      component: ComponentBuilder handle.
      name: Human readable name to organize the variables.
      shape: Shape of the layer to be normalized.
      dtype: Type of the layer to be normalized.
    """
    self._name = name
    self._shape = shape
    self._component = component
    beta = tf.get_variable(
        'beta_%s' % name,
        shape=shape,
        dtype=dtype,
        initializer=tf.zeros_initializer())
    gamma = tf.get_variable(
        'gamma_%s' % name,
        shape=shape,
        dtype=dtype,
        initializer=tf.ones_initializer())
    self._params = [beta, gamma] 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:25,代码来源:network_units.py

示例2: layer_norm

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_initializer [as 别名]
def layer_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
  """Layer normalize the tensor x, averaging over the last dimension."""
  if filters is None:
    filters = shape_list(x)[-1]
  with tf.variable_scope(
      name, default_name="layer_norm", values=[x], reuse=reuse):
    scale = tf.get_variable(
        "layer_norm_scale", [filters], initializer=tf.ones_initializer())
    bias = tf.get_variable(
        "layer_norm_bias", [filters], initializer=tf.zeros_initializer())
    if allow_defun:
      result = layer_norm_compute(x, tf.constant(epsilon), scale, bias)
      result.set_shape(x.get_shape())
    else:
      result = layer_norm_compute_python(x, epsilon, scale, bias)
    return result 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:18,代码来源:common_layers.py

示例3: group_norm

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_initializer [as 别名]
def group_norm(x, filters=None, num_groups=8, epsilon=1e-5):
  """Group normalization as in https://arxiv.org/abs/1803.08494."""
  x_shape = shape_list(x)
  if filters is None:
    filters = x_shape[-1]
  assert len(x_shape) == 4
  assert filters % num_groups == 0
  # Prepare variables.
  scale = tf.get_variable(
      "group_norm_scale", [filters], initializer=tf.ones_initializer())
  bias = tf.get_variable(
      "group_norm_bias", [filters], initializer=tf.zeros_initializer())
  epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
  # Reshape and compute group norm.
  x = tf.reshape(x, x_shape[:-1] + [num_groups, filters // num_groups])
  # Calculate mean and variance on heights, width, channels (not groups).
  mean, variance = tf.nn.moments(x, [1, 2, 4], keep_dims=True)
  norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
  return tf.reshape(norm_x, x_shape) * scale + bias 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:21,代码来源:common_layers.py

示例4: ln

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_initializer [as 别名]
def ln(inputs, epsilon = 1e-8, scope="ln"):
    '''Applies layer normalization. See https://arxiv.org/abs/1607.06450.
    inputs: A tensor with 2 or more dimensions, where the first dimension has `batch_size`.
    epsilon: A floating number. A very small number for preventing ZeroDivision Error.
    scope: Optional scope for `variable_scope`.
      
    Returns:
      A tensor with the same shape and data dtype as `inputs`.
    '''
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
        inputs_shape = inputs.get_shape()
        params_shape = inputs_shape[-1:]
    
        mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
        beta= tf.get_variable("beta", params_shape, initializer=tf.zeros_initializer())
        gamma = tf.get_variable("gamma", params_shape, initializer=tf.ones_initializer())
        normalized = (inputs - mean) / ( (variance + epsilon) ** (.5) )
        outputs = gamma * normalized + beta
        
    return outputs 
开发者ID:Kyubyong,项目名称:transformer,代码行数:22,代码来源:modules.py

示例5: scale_gaussian_prior

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_initializer [as 别名]
def scale_gaussian_prior(name, z, logscale_factor=3.0, trainable=True):
  """Returns N(s^i * z^i, std^i) where s^i and std^i are pre-component.

  s^i is a learnable parameter with identity initialization.
  std^i is optionally learnable with identity initialization.

  Args:
    name: variable scope.
    z: input_tensor
    logscale_factor: equivalent to scaling up the learning_rate by a factor
                     of logscale_factor.
    trainable: Whether or not std^i is learnt.
  """
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    z_shape = common_layers.shape_list(z)
    latent_multiplier = tf.get_variable(
        "latent_multiplier", shape=z_shape, dtype=tf.float32,
        initializer=tf.ones_initializer())
    log_scale = tf.get_variable(
        "log_scale_latent", shape=z_shape, dtype=tf.float32,
        initializer=tf.zeros_initializer(), trainable=trainable)
    log_scale = log_scale * logscale_factor
    return tfp.distributions.Normal(
        loc=latent_multiplier * z, scale=tf.exp(log_scale)) 
开发者ID:yyht,项目名称:BERT,代码行数:26,代码来源:glow_ops.py

示例6: layer_normalization

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_initializer [as 别名]
def layer_normalization(self,x):
        """
        x should be:[batch_size,sequence_length,d_model]
        :return:
        """
        filter=x.get_shape()[-1] #last dimension of x. e.g. 512
        #print("layer_normalization:==================>variable_scope:","layer_normalization"+str(self.layer_index))
        with tf.variable_scope("layer_normalization"+str(self.layer_index)):
            # 1. normalize input by using  mean and variance according to last dimension
            mean=tf.reduce_mean(x,axis=-1,keepdims=True) #[batch_size,sequence_length,1]
            variance=tf.reduce_mean(tf.square(x-mean),axis=-1,keepdims=True) #[batch_size,sequence_length,1]
            norm_x=(x-mean)*tf.rsqrt(variance+1e-6) #[batch_size,sequence_length,d_model]
            # 2. re-scale normalized input back
            scale=tf.get_variable("layer_norm_scale",[filter],initializer=tf.ones_initializer) #[filter]
            bias=tf.get_variable("layer_norm_bias",[filter],initializer=tf.ones_initializer) #[filter]
            output=norm_x*scale+bias #[batch_size,sequence_length,d_model]
            return output #[batch_size,sequence_length,d_model] 
开发者ID:yyht,项目名称:BERT,代码行数:19,代码来源:layer_norm_residual_conn.py

示例7: test_std_share_network_output_values

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_initializer [as 别名]
def test_std_share_network_output_values(self, output_dim, hidden_sizes):
        model = GaussianMLPModel(output_dim=output_dim,
                                 hidden_sizes=hidden_sizes,
                                 std_share_network=True,
                                 hidden_nonlinearity=None,
                                 std_parameterization='exp',
                                 hidden_w_init=tf.ones_initializer(),
                                 output_w_init=tf.ones_initializer())
        dist = model.build(self.input_var).dist

        mean, log_std = self.sess.run(
            [dist.loc, tf.math.log(dist.stddev())],
            feed_dict={self.input_var: self.obs})

        expected_mean = np.full([1, 1, output_dim], 5 * np.prod(hidden_sizes))
        expected_log_std = np.full([1, 1, output_dim],
                                   5 * np.prod(hidden_sizes))
        assert np.array_equal(mean, expected_mean)
        assert np.array_equal(log_std, expected_log_std) 
开发者ID:rlworkgroup,项目名称:garage,代码行数:21,代码来源:test_gaussian_mlp_model.py

示例8: test_without_std_share_network_output_values

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_initializer [as 别名]
def test_without_std_share_network_output_values(self, output_dim,
                                                     hidden_sizes):
        model = GaussianMLPModel(output_dim=output_dim,
                                 hidden_sizes=hidden_sizes,
                                 init_std=2,
                                 std_share_network=False,
                                 adaptive_std=False,
                                 hidden_nonlinearity=None,
                                 hidden_w_init=tf.ones_initializer(),
                                 output_w_init=tf.ones_initializer())
        dist = model.build(self.input_var).dist

        mean, log_std = self.sess.run(
            [dist.loc, tf.math.log(dist.stddev())],
            feed_dict={self.input_var: self.obs})

        expected_mean = np.full([1, 1, output_dim], 5 * np.prod(hidden_sizes))
        expected_log_std = np.full([1, 1, output_dim], np.log(2.))
        assert np.array_equal(mean, expected_mean)
        assert np.allclose(log_std, expected_log_std) 
开发者ID:rlworkgroup,项目名称:garage,代码行数:22,代码来源:test_gaussian_mlp_model.py

示例9: test_adaptive_std_network_output_values

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_initializer [as 别名]
def test_adaptive_std_network_output_values(self, output_dim, hidden_sizes,
                                                std_hidden_sizes):
        model = GaussianMLPModel(output_dim=output_dim,
                                 std_share_network=False,
                                 hidden_sizes=hidden_sizes,
                                 std_hidden_sizes=std_hidden_sizes,
                                 adaptive_std=True,
                                 hidden_nonlinearity=None,
                                 hidden_w_init=tf.ones_initializer(),
                                 output_w_init=tf.ones_initializer(),
                                 std_hidden_nonlinearity=None,
                                 std_hidden_w_init=tf.ones_initializer(),
                                 std_output_w_init=tf.ones_initializer())
        dist = model.build(self.input_var).dist

        mean, log_std = self.sess.run(
            [dist.loc, tf.math.log(dist.stddev())],
            feed_dict={self.input_var: self.obs})

        expected_mean = np.full([1, 1, output_dim], 5 * np.prod(hidden_sizes))
        expected_log_std = np.full([1, 1, output_dim],
                                   5 * np.prod(std_hidden_sizes))
        assert np.array_equal(mean, expected_mean)
        assert np.array_equal(log_std, expected_log_std) 
开发者ID:rlworkgroup,项目名称:garage,代码行数:26,代码来源:test_gaussian_mlp_model.py

示例10: test_softplus_output_values

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_initializer [as 别名]
def test_softplus_output_values(self, output_dim, hidden_sizes):
        model = GaussianMLPModel(output_dim=output_dim,
                                 hidden_sizes=hidden_sizes,
                                 hidden_nonlinearity=None,
                                 std_share_network=False,
                                 adaptive_std=False,
                                 init_std=2,
                                 std_parameterization='softplus',
                                 hidden_w_init=tf.ones_initializer(),
                                 output_w_init=tf.ones_initializer())
        dist = model.build(self.input_var).dist

        mean, log_std = self.sess.run(
            [dist.loc, tf.math.log(dist.stddev())],
            feed_dict={self.input_var: self.obs})

        expected_mean = np.full([1, 1, output_dim], 5 * np.prod(hidden_sizes))
        expected_std_param = np.full([1, 1, output_dim], np.log(np.exp(2) - 1))
        expected_log_std = np.log(np.log(1. + np.exp(expected_std_param)))
        assert np.array_equal(mean, expected_mean)
        assert np.allclose(log_std, expected_log_std) 
开发者ID:rlworkgroup,项目名称:garage,代码行数:23,代码来源:test_gaussian_mlp_model.py

示例11: test_is_pickleable

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_initializer [as 别名]
def test_is_pickleable(self, output_dim, hidden_sizes):
        model = CategoricalMLPModel(output_dim=output_dim,
                                    hidden_sizes=hidden_sizes,
                                    hidden_nonlinearity=None,
                                    hidden_w_init=tf.ones_initializer(),
                                    output_w_init=tf.ones_initializer())
        dist = model.build(self.input_var).dist
        # assign bias to all one
        with tf.compat.v1.variable_scope('CategoricalMLPModel/mlp',
                                         reuse=True):
            bias = tf.compat.v1.get_variable('hidden_0/bias')

        bias.load(tf.ones_like(bias).eval())

        output1 = self.sess.run(dist.probs,
                                feed_dict={self.input_var: self.obs})

        h = pickle.dumps(model)
        with tf.compat.v1.Session(graph=tf.Graph()) as sess:
            input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
            model_pickled = pickle.loads(h)
            dist2 = model_pickled.build(input_var).dist
            output2 = sess.run(dist2.probs, feed_dict={input_var: self.obs})

            assert np.array_equal(output1, output2) 
开发者ID:rlworkgroup,项目名称:garage,代码行数:27,代码来源:test_categorical_mlp_model.py

示例12: test_output_values_merging

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_initializer [as 别名]
def test_output_values_merging(self, output_dim, hidden_sizes):
        model = MLPMergeModel(output_dim=output_dim,
                              hidden_sizes=hidden_sizes,
                              concat_layer=0,
                              hidden_nonlinearity=None,
                              hidden_w_init=tf.ones_initializer(),
                              output_w_init=tf.ones_initializer())

        input_var2 = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
        obs2 = np.ones((1, 5))

        outputs = model.build(self.input_var, input_var2).outputs
        output = self.sess.run(outputs,
                               feed_dict={
                                   self.input_var: self.obs,
                                   input_var2: obs2
                               })

        expected_output = np.full([1, output_dim], 10 * np.prod(hidden_sizes))
        assert np.array_equal(output, expected_output)

    # yapf: disable 
开发者ID:rlworkgroup,项目名称:garage,代码行数:24,代码来源:test_mlp_model.py

示例13: test_is_pickleable

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_initializer [as 别名]
def test_is_pickleable(self, output_dim, hidden_sizes):
        model = MLPModel(output_dim=output_dim,
                         hidden_sizes=hidden_sizes,
                         hidden_nonlinearity=None,
                         hidden_w_init=tf.ones_initializer(),
                         output_w_init=tf.ones_initializer())
        outputs = model.build(self.input_var).outputs

        # assign bias to all one
        with tf.compat.v1.variable_scope('MLPModel/mlp', reuse=True):
            bias = tf.compat.v1.get_variable('hidden_0/bias')

        bias.load(tf.ones_like(bias).eval())

        output1 = self.sess.run(outputs, feed_dict={self.input_var: self.obs})

        h = pickle.dumps(model)
        with tf.compat.v1.Session(graph=tf.Graph()) as sess:
            input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
            model_pickled = pickle.loads(h)
            outputs = model_pickled.build(input_var).outputs
            output2 = sess.run(outputs, feed_dict={input_var: self.obs})

            assert np.array_equal(output1, output2) 
开发者ID:rlworkgroup,项目名称:garage,代码行数:26,代码来源:test_mlp_model.py

示例14: layer_norm

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_initializer [as 别名]
def layer_norm(inputs, epsilon=1e-6, dtype=None, scope=None):
    """
    Layer Normalization
    :param inputs: A Tensor of shape [..., channel_size]
    :param epsilon: A floating number
    :param dtype: An optional instance of tf.DType
    :param scope: An optional string
    :returns: A Tensor with the same shape as inputs
    """
    with tf.variable_scope(scope, default_name="layer_norm", values=[inputs],
                           dtype=dtype):
        channel_size = inputs.get_shape().as_list()[-1]

        scale = tf.get_variable("scale", shape=[channel_size],
                                initializer=tf.ones_initializer())

        offset = tf.get_variable("offset", shape=[channel_size],
                                 initializer=tf.zeros_initializer())

        mean = tf.reduce_mean(inputs, -1, True)
        variance = tf.reduce_mean(tf.square(inputs - mean), -1, True)

        norm_inputs = (inputs - mean) * tf.rsqrt(variance + epsilon)

        return norm_inputs * scale + offset 
开发者ID:THUNLP-MT,项目名称:THUMT,代码行数:27,代码来源:nn.py

示例15: batch_norm

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_initializer [as 别名]
def batch_norm(x, is_training, eps=EPS, decay=0.9, affine=True, name='BatchNorm2d'):
		from tensorflow.python.training.moving_averages import assign_moving_average

		with tf.variable_scope(name):
			params_shape = x.shape[-1:]
			moving_mean = tf.get_variable(name='mean', shape=params_shape, initializer=tf.zeros_initializer, trainable=False)
			moving_var = tf.get_variable(name='variance', shape=params_shape, initializer=tf.ones_initializer, trainable=False)

			def mean_var_with_update():
				mean_this_batch, variance_this_batch = tf.nn.moments(x, list(range(len(x.shape) - 1)), name='moments')
				with tf.control_dependencies([
					assign_moving_average(moving_mean, mean_this_batch, decay),
					assign_moving_average(moving_var, variance_this_batch, decay)
				]):
					return tf.identity(mean_this_batch), tf.identity(variance_this_batch)

			mean, variance = tf.cond(is_training, mean_var_with_update, lambda: (moving_mean, moving_var))
			if affine:  # 如果要用beta和gamma进行放缩
				beta = tf.get_variable('beta', params_shape, initializer=tf.zeros_initializer)
				gamma = tf.get_variable('gamma', params_shape, initializer=tf.ones_initializer)
				normed = tf.nn.batch_normalization(x, mean=mean, variance=variance, offset=beta, scale=gamma, variance_epsilon=eps)
			else:
				normed = tf.nn.batch_normalization(x, mean=mean, variance=variance, offset=None, scale=None,  variance_epsilon=eps)
			return normed 
开发者ID:DuFanXin,项目名称:U-net,代码行数:26,代码来源:unet-TF-withBatchNormal.py


注:本文中的tensorflow.ones_initializer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。