当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.uniform_unit_scaling_initializer函数代码示例

本文整理汇总了Python中tensorflow.uniform_unit_scaling_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python uniform_unit_scaling_initializer函数的具体用法?Python uniform_unit_scaling_initializer怎么用?Python uniform_unit_scaling_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了uniform_unit_scaling_initializer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _init_parameters

 def _init_parameters(self):
     if self.W is None:
         self.W = vs.get_variable("W", [self._filters_num + self._num_units, self._num_units], initializer=tf.uniform_unit_scaling_initializer(factor=weight_init_factor))
     if self.F is None:
         self.F = vs.get_variable("F", [L, filters_num], initializer=tf.uniform_unit_scaling_initializer(factor=weight_init_factor))
     if self.R is None:
         self.R = vs.get_variable("R", [L, 1], initializer=tf.uniform_unit_scaling_initializer(factor=weight_init_factor*0.5))
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:7,代码来源:tf.py

示例2: __call__

    def __call__(self, inputs, states, scope=None):
        with tf.variable_scope(
                scope or type(self).__name__,
                initializer=tf.random_normal_initializer(stddev=0.01)):
            # get the tensor
            if self._separate_pad:
                t_shape = [self._num_outputs,
                           self._num_outputs,
                           self._num_inputs]
                vec_a = inputs
                vec_b = states
            else:
                t_shape = [self._num_outputs+1,
                           self._num_outputs,
                           self._num_inputs+1]
                vec_a = tf.concat(
                    axis=1, values=[inputs, tf.ones([inputs.get_shape()[0].value, 1])])
                vec_b = tf.concat(
                    axis=1, values=[inputs, tf.ones([inputs.get_shape()[0].value, 1])])
            tensor = get_tt_3_tensor(t_shape, self._ranks, name='W')
            result = bilinear_product_tt_3(vec_a, tensor, vec_b)
            if self._separate_pad:
                # TODO possible weightnorm
                D = tf.get_variable('D', [self._num_inputs, self._num_outputs],
                                    initializer=tf.uniform_unit_scaling_initializer(1.2))
                E = tf.get_variable('E', [self._num_outputs, self._num_outputs],
                                    initializer=tf.uniform_unit_scaling_initializer(1.2))
                b = tf.get_variable('b', [self._num_outputs],
                                    initializer=tf.constant_initializer(0.0))
                z = tf.nn.bias_add(tf.matmul(inputs, D) + tf.matmul(states, E), b)
                result = result + z

            result = self._nonlin(result)
            return result, result
开发者ID:PFCM,项目名称:rnns,代码行数:34,代码来源:simple_tensor_rnn.py

示例3: __init__

    def __init__(
        self,
        num_units,
        activation = simple_act,
        input_weights_init = tf.uniform_unit_scaling_initializer(factor=1.0),
        recc_weights_init = tf.uniform_unit_scaling_initializer(factor=0.1),
        sigma = 1.0,
        update_gate = True,
        dt = 1.0
    ):
        self._num_units = num_units
        self._activation = activation
        self._dt = dt
        self._sigma = sigma if sigma else 1.0
        self._update_gate = update_gate

        self.W = None
        self.U = None
        self.bias = None
        self.W_u = None
        self.U_u = None
        self.bias_u = None
        self.W_s = None
        self.U_s = None
        self.bias_s = None
        self.sigma = None

        self.input_weights_init = input_weights_init
        self.recc_weights_init = recc_weights_init
        
        self._sensitivity = False
        
        self.states_info = []
        self.update_info = []
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:34,代码来源:model.py

示例4: testInitializerIdentical

 def testInitializerIdentical(self):
   for use_gpu in [False, True]:
     init1 = tf.uniform_unit_scaling_initializer(seed=1)
     init2 = tf.uniform_unit_scaling_initializer(seed=1)
     self.assertTrue(identicaltest(self, init1, init2, use_gpu))
     init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1)
     init4 = tf.uniform_unit_scaling_initializer(1.5, seed=1)
     self.assertTrue(identicaltest(self, init3, init4, use_gpu))
开发者ID:DapengLan,项目名称:tensorflow,代码行数:8,代码来源:init_ops_test.py

示例5: testInitializerDifferent

 def testInitializerDifferent(self):
   for use_gpu in [False, True]:
     init1 = tf.uniform_unit_scaling_initializer(seed=1)
     init2 = tf.uniform_unit_scaling_initializer(seed=2)
     init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1)
     self.assertFalse(identicaltest(self, init1, init2, use_gpu))
     self.assertFalse(identicaltest(self, init1, init3, use_gpu))
     self.assertFalse(identicaltest(self, init2, init3, use_gpu))
开发者ID:DapengLan,项目名称:tensorflow,代码行数:8,代码来源:init_ops_test.py

示例6: testInitializerIdentical

 def testInitializerIdentical(self):
   for dtype in [tf.float32, tf.float64]:
     init1 = tf.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
     init2 = tf.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
     self.assertTrue(identicaltest(self, init1, init2))
     init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1, dtype=dtype)
     init4 = tf.uniform_unit_scaling_initializer(1.5, seed=1, dtype=dtype)
     self.assertTrue(identicaltest(self, init3, init4))
开发者ID:Nishant23,项目名称:tensorflow,代码行数:8,代码来源:init_ops_test.py

示例7: testInitializerDifferent

 def testInitializerDifferent(self):
   for dtype in [tf.float32, tf.float64]:
     init1 = tf.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
     init2 = tf.uniform_unit_scaling_initializer(seed=2, dtype=dtype)
     init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1, dtype=dtype)
     self.assertFalse(identicaltest(self, init1, init2))
     self.assertFalse(identicaltest(self, init1, init3))
     self.assertFalse(identicaltest(self, init2, init3))
开发者ID:Nishant23,项目名称:tensorflow,代码行数:8,代码来源:init_ops_test.py

示例8: sharded_variable

def sharded_variable(name, shape, num_shards, dtype=tf.float32, transposed=False):
    '''分片操作'''
    shard_size = int((shape[0] + num_shards - 1) / num_shards)
    if transposed:
        initializer = tf.uniform_unit_scaling_initializer(
            dtype=dtype, )
    else:
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype, )
    return [tf.get_variable(name + '_%d' % i, [shard_size,
                                               shape[1]],
                            initializer=initializer, dtype=dtype)
            for i in range(num_shards)]
开发者ID:IgorWang,项目名称:RNNLM,代码行数:12,代码来源:model_utils.py

示例9: make_variable

def make_variable(name, shape, initializer, weight_decay=None, lr_mult=1, decay_mult=1):
    if lr_mult == 0:
        var = tf.get_variable(name, shape, initializer=initializer, trainable=False)
    elif weight_decay is None:
        var = tf.get_variable(  name, shape,
                                initializer=tf.uniform_unit_scaling_initializer())
    else:
        var = tf.get_variable(  name, shape,
			initializer=tf.uniform_unit_scaling_initializer(),
                                regularizer=tf.contrib.layers.l2_regularizer(weight_decay*decay_mult))

    if lr_mult > 0:
        tf.add_to_collection(str(lr_mult), var);

    return var
开发者ID:24hours,项目名称:tf_fcn,代码行数:15,代码来源:FCN.py

示例10: __call__

    def __call__(self, inputs, states, scope=None):
        with tf.variable_scope(scope or type(self).__name__) as outer_scope:
            # do it
            # sub scope for the tensor init
            # should inherit reuse from outer scope
            with tf.variable_scope('tensor',
                                   initializer=init.orthonormal_init(0.5)):
                tensor = get_cp_tensor([self.input_size,
                                        self.output_size,
                                        self.state_size],
                                       self.rank,
                                       'W',
                                       weightnorm=False,
                                       trainable=True)
            combination = bilinear_product_cp(inputs, tensor, states)
            # and project the input
            input_weights = tf.get_variable('U', shape=[self.input_size,
                                                        self._input_projection],
                                            initializer=tf.uniform_unit_scaling_initializer(1.4))
            input_proj = tf.matmul(inputs, input_weights)
            # apply a bias pre-nonlinearity
            bias = tf.get_variable('b', shape=[self.output_size],
                                   initializer=tf.constant_initializer(0.0))
            if self.layernorm == 'pre':
                activations = layer_normalise(combination + input_proj + bias)
            else:
                activations = combination + input_proj + bias

            result = self._nonlinearity(activations)

            if self.layernorm == 'post':
                result = layer_normalise(result)

            result = result + states
        return result, result
开发者ID:PFCM,项目名称:rnns,代码行数:35,代码来源:additive_tensor_rnn.py

示例11: setup_loss_critic

def setup_loss_critic(critic):
    # we are starting with critic.outputs symbol (after logistic layer)
    with tf.variable_scope("rl", initializer=tf.uniform_unit_scaling_initializer(1.0)):
        # loss setup
        # None to timestep
        critic.target_qt = tf.placeholder(tf.float32, shape=[None, None, critic.vocab_size],
                                            name="q_action_score")
        # p_actions is the target_token, and it's already [T, batch_size]
        # q_t needs to be expanded...

        # critic.outputs [T, batch_size, vocab_size]
        # let's populate (expand) target tokens to fill up qt (just like what we did with one-hot labels)

        critic.q_loss = tf.reduce_mean(tf.square(critic.outputs - critic.target_qt))  # Note: not adding lambda*C yet (variance)

        opt = nlc_model.get_optimizer(FLAGS.optimizer)(critic.learning_rate)

        # update
        params = tf.trainable_variables()
        gradients = tf.gradients(critic.q_loss, params)
        clipped_gradients, _ = tf.clip_by_global_norm(gradients, FLAGS.max_gradient_norm)
        #      self.gradient_norm = tf.global_norm(clipped_gradients)
        critic.gradient_norm = tf.global_norm(gradients)
        critic.param_norm = tf.global_norm(params)
        critic.updates = opt.apply_gradients(
            zip(clipped_gradients, params), global_step=critic.global_step)
开发者ID:windweller,项目名称:nlc,代码行数:26,代码来源:rl_train.py

示例12: _init_parameters

    def _init_parameters(self):
    	return tf.get_variable("F", [self._filter_size, self._input_size, self._layer_size], 
	        initializer=tf.uniform_unit_scaling_initializer(factor=c.weight_init_factor)
        )

	def __call__(self, input, state, scope=None):
		####
		
		if self._params is None:
			self._params = self._init_parameters()

		x = input
		u, a = state
		F = self._params

		####
		b = tf.nn.conv1d(x, F, 1)
		Fc = tf.matmul(tf.transpose(F, (0, 2, 1), F))
		fb = tf.conv1d(a, Fc, 1)
		
		print "b", b.get_shape()
		print "Fc", Fc.get_shape()
		print "fb", fb.get_shape()

		du = - u + b - fb
		new_u = u + c.epsilon * du / c.tau

        new_a = tf.nn.relu(new_u - c.lam)
        
        ####

        return (new_u, new_a), (new_u, new_a)
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:32,代码来源:lca_tf.py

示例13: FullyConnected

def FullyConnected(x, out_dim,
                   W_init=None, b_init=None,
                   nl=tf.nn.relu, use_bias=True):
    """
    Fully-Connected layer.

    :param input: a tensor to be flattened except the first dimension.
    :param out_dim: output dimension
    :param W_init: initializer for W. default to `xavier_initializer_conv2d`.
    :param b_init: initializer for b. default to zero initializer.
    :param nl: nonlinearity. default to `relu`.
    :param use_bias: whether to use bias. a boolean default to True
    :returns: a 2D tensor
    """
    x = batch_flatten(x)
    in_dim = x.get_shape().as_list()[1]

    if W_init is None:
        #W_init = tf.truncated_normal_initializer(stddev=1 / math.sqrt(float(in_dim)))
        W_init = tf.uniform_unit_scaling_initializer(factor=1.43)
    if b_init is None:
        b_init = tf.constant_initializer()

    W = tf.get_variable('W', [in_dim, out_dim], initializer=W_init)
    if use_bias:
        b = tf.get_variable('b', [out_dim], initializer=b_init)
    prod = tf.nn.xw_plus_b(x, W, b) if use_bias else tf.matmul(x, W)
    return nl(prod, name='output')
开发者ID:Paseam,项目名称:tensorpack,代码行数:28,代码来源:fc.py

示例14: testTransformerAutoencoder

  def testTransformerAutoencoder(self):
    hparams = imagetransformer_latent_tiny()
    hparams.mode = tf.estimator.ModeKeys.TRAIN
    block_dim = int(hparams.hidden_size // hparams.num_blocks)
    block_v_size = 2**(hparams.bottleneck_bits /
                       (hparams.num_residuals * hparams.num_blocks))
    block_v_size = int(block_v_size)
    means = tf.get_variable(
        name="means",
        shape=[hparams.num_residuals,
               hparams.num_blocks,
               block_v_size,
               block_dim],
        initializer=tf.uniform_unit_scaling_initializer())
    hparams.bottleneck = functools.partial(
        discretization.discrete_bottleneck,
        hidden_size=hparams.hidden_size,
        z_size=hparams.bottleneck_bits,
        filter_size=hparams.filter_size,
        startup_steps=hparams.startup_steps,
        bottleneck_kind=hparams.bottleneck_kind,
        num_blocks=hparams.num_blocks,
        num_residuals=hparams.num_residuals,
        reshape_method=hparams.reshape_method,
        beta=hparams.vq_beta,
        decay=hparams.vq_decay,
        soft_em=hparams.soft_em,
        num_samples=hparams.num_samples,
        epsilon=hparams.vq_epsilon,
        ema=hparams.ema,
        means=means)

    inputs = None
    batch_size = hparams.batch_size
    targets = tf.random_uniform([batch_size,
                                 hparams.img_len,
                                 hparams.img_len,
                                 hparams.hidden_size],
                                minval=-1., maxval=1.)
    target_space_id = None

    tf.train.create_global_step()
    decoder_output, losses, cache = latent_layers.transformer_autoencoder(
        inputs, targets, target_space_id, hparams)

    self.assertEqual(set(six.iterkeys(losses)),
                     {"extra", "extra_loss", "latent_pred"})

    self.evaluate(tf.global_variables_initializer())
    decoder_output_, extra_loss_, latent_pred_ = self.evaluate(
        [decoder_output, losses["extra_loss"], losses["latent_pred"]])
    self.assertEqual(decoder_output_.shape, (batch_size,
                                             hparams.img_len,
                                             hparams.img_len,
                                             hparams.hidden_size))
    self.assertEqual(extra_loss_.shape, (batch_size,))
    self.assertEqual(latent_pred_.shape, (batch_size,))
    self.assertAllGreaterEqual(extra_loss_, 0.)
    self.assertAllGreaterEqual(latent_pred_, 0.)
    self.assertEqual(cache, None)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:60,代码来源:latent_layers_test.py

示例15: _fully_connected

 def _fully_connected(self, x, out_dim):
   x = tf.reshape(x, [self._params.batch_size, -1])
   w = tf.get_variable(
       'DW', [x.get_shape()[1], out_dim],
       initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
   b = tf.get_variable(
       'biases', [out_dim], initializer=tf.constant_initializer())
   return tf.nn.xw_plus_b(x, w, b)
开发者ID:812864539,项目名称:models,代码行数:8,代码来源:embedders.py


注:本文中的tensorflow.uniform_unit_scaling_initializer函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。