当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.get_variable方法代码示例

本文整理汇总了Python中tensorflow.get_variable方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.get_variable方法的具体用法?Python tensorflow.get_variable怎么用?Python tensorflow.get_variable使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.get_variable方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_adam

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable [as 别名]
def test_adam(self):
        with self.test_session() as sess:
            w = tf.get_variable(
                "w",
                shape=[3],
                initializer=tf.constant_initializer([0.1, -0.2, -0.1]))
            x = tf.constant([0.4, 0.2, -0.5])
            loss = tf.reduce_mean(tf.square(x - w))
            tvars = tf.trainable_variables()
            grads = tf.gradients(loss, tvars)
            global_step = tf.train.get_or_create_global_step()
            optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2)
            train_op = optimizer.apply_gradients(zip(grads, tvars), global_step)
            init_op = tf.group(tf.global_variables_initializer(),
                               tf.local_variables_initializer())
            sess.run(init_op)
            for _ in range(100):
                sess.run(train_op)
            w_np = sess.run(w)
            self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2) 
开发者ID:Socialbird-AILab,项目名称:BERT-Classification-Tutorial,代码行数:22,代码来源:optimization_test.py

示例2: wrap_variable

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable [as 别名]
def wrap_variable(self, var):
        """wrap layer.w into variables"""
        val = self.lay.w.get(var, None)
        if val is None:
            shape = self.lay.wshape[var]
            args = [0., 1e-2, shape]
            if 'moving_mean' in var:
                val = np.zeros(shape)
            elif 'moving_variance' in var:
                val = np.ones(shape)
            else:
                val = np.random.normal(*args)
            self.lay.w[var] = val.astype(np.float32)
            self.act = 'Init '
        if not self.var: return

        val = self.lay.w[var]
        self.lay.w[var] = tf.constant_initializer(val)
        if var in self._SLIM: return
        with tf.variable_scope(self.scope):
            self.lay.w[var] = tf.get_variable(var,
                shape = self.lay.wshape[var],
                dtype = tf.float32,
                initializer = self.lay.w[var]) 
开发者ID:AmeyaWagh,项目名称:Traffic_sign_detection_YOLO,代码行数:26,代码来源:baseop.py

示例3: setUp

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable [as 别名]
def setUp(self):
        super(TestRunnerMultiGPU, self).setUp()
        self.sess = tf.Session()

        inputs = []
        outputs = []
        self.niter = 10
        niter = self.niter
        # A Simple graph with `niter` sub-graphs.
        with tf.variable_scope(None, 'runner'):
            for i in range(niter):
                v = tf.get_variable('v%d' % i, shape=(100, 10))
                w = tf.get_variable('w%d' % i, shape=(100, 1))

                inputs += [{'v': v, 'w': w}]
                outputs += [{'v': v, 'w': w}]

        self.runner = RunnerMultiGPU(inputs, outputs, sess=self.sess) 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:20,代码来源:test_runner.py

示例4: set_input_shape

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable [as 别名]
def set_input_shape(self, input_shape):
        batch_size, rows, cols, input_channels = input_shape
        kernel_shape = tuple(self.kernel_shape) + (input_channels,
                                                   self.output_channels)
        assert len(kernel_shape) == 4
        assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
        with tf.variable_scope(self.name):
            init = tf.truncated_normal(kernel_shape, stddev=0.1)
            self.kernels = self.get_variable(self.w_name, init)
            self.b = self.get_variable(
                'b', .1 + np.zeros((self.output_channels,)).astype('float32'))
        input_shape = list(input_shape)
        self.input_shape = input_shape
        input_shape[0] = 1
        dummy_batch = tf.zeros(input_shape)
        dummy_output = self.fprop(dummy_batch)
        output_shape = [int(e) for e in dummy_output.get_shape()]
        output_shape[0] = 1
        self.output_shape = tuple(output_shape) 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:21,代码来源:model.py

示例5: _create_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable [as 别名]
def _create_loss(self):
        """ Step 4: define the loss function """
        with tf.name_scope('loss'):
            # construct variables for NCE loss
            nce_weight = tf.get_variable('nce_weight',
                                         shape=[self.vocab_size, self.embed_size],
                                         initializer=tf.truncated_normal_initializer(
                                             stddev=1.0 / (self.embed_size ** 0.5)))
            nce_bias = tf.get_variable('nce_bias', initializer=tf.zeros([VOCAB_SIZE]))

            # define loss function to be NCE loss function
            self.loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weight,
                                                      biases=nce_bias,
                                                      labels=self.target_words,
                                                      inputs=self.embed,
                                                      num_sampled=self.num_sampled,
                                                      num_classes=self.vocab_size), name='loss') 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:19,代码来源:11_w2v_visual.py

示例6: build_permutation

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable [as 别名]
def build_permutation(self):

        with tf.variable_scope("encoder"):
            
            with tf.variable_scope("embedding"):
                # Embed input sequence
                W_embed =tf.get_variable("weights", [1,self.input_dimension+2, self.input_embed], initializer=self.initializer) # +2 for TW feat. here too
                embedded_input = tf.nn.conv1d(self.input_, W_embed, 1, "VALID", name="embedded_input")
                # Batch Normalization
                embedded_input = tf.layers.batch_normalization(embedded_input, axis=2, training=self.is_training, name='layer_norm', reuse=None)

            with tf.variable_scope("dynamic_rnn"):
                # Encode input sequence
                cell1 = LSTMCell(self.num_neurons, initializer=self.initializer)  # BNLSTMCell(self.num_neurons, self.training) or cell1 = DropoutWrapper(cell1, output_keep_prob=0.9)
                # Return the output activations [Batch size, Sequence Length, Num_neurons] and last hidden state as tensors.
                encoder_output, encoder_state = tf.nn.dynamic_rnn(cell1, embedded_input, dtype=tf.float32)

        with tf.variable_scope('decoder'):
            # Ptr-net returns permutations (self.positions), with their log-probability for backprop
            self.ptr = Pointer_decoder(encoder_output, self.config)
            self.positions, self.log_softmax, self.attending, self.pointing = self.ptr.loop_decode(encoder_state)
            variable_summaries('log_softmax',self.log_softmax, with_max_min = True) 
开发者ID:MichelDeudon,项目名称:neural-combinatorial-optimization-rl-tensorflow,代码行数:24,代码来源:actor.py

示例7: encode

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable [as 别名]
def encode(self, inputs):

        # Tensor blocks holding the input sequences [Batch Size, Sequence Length, Features]
        #self.input_ = tf.placeholder(tf.float32, [self.batch_size, self.max_length, self.input_dimension], name="input_raw")

        with tf.variable_scope("embedding"):
          # Embed input sequence
          W_embed =tf.get_variable("weights",[1,self.input_dimension, self.input_embed], initializer=self.initializer)
          self.embedded_input = tf.nn.conv1d(inputs, W_embed, 1, "VALID", name="embedded_input")
          # Batch Normalization
          self.enc = tf.layers.batch_normalization(self.embedded_input, axis=2, training=self.is_training, name='layer_norm', reuse=None)
        
        with tf.variable_scope("stack"):
          # Blocks
          for i in range(self.num_stacks): # num blocks
              with tf.variable_scope("block_{}".format(i)):
                  ### Multihead Attention
                  self.enc = multihead_attention(self.enc, num_units=self.input_embed, num_heads=self.num_heads, dropout_rate=0.1, is_training=self.is_training)
                  
                  ### Feed Forward
                  self.enc = feedforward(self.enc, num_units=[4*self.input_embed, self.input_embed], is_training=self.is_training)

          # Return the output activations [Batch size, Sequence Length, Num_neurons] as tensors.
          self.encoder_output = self.enc ### NOTE: encoder_output is the ref for attention ###
          return self.encoder_output 
开发者ID:MichelDeudon,项目名称:neural-combinatorial-optimization-rl-tensorflow,代码行数:27,代码来源:encoder.py

示例8: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable [as 别名]
def __call__(self, inputs, state, scope=None):
    """GRU cell with layer normalization."""
    input_dim = inputs.get_shape().as_list()[1]
    num_units = self._num_units

    with tf.variable_scope(scope or "gru_cell"):
      with tf.variable_scope("gates"):
        w_h = tf.get_variable(
            "w_h", [num_units, 2 * num_units],
            initializer=self._w_h_initializer())
        w_x = tf.get_variable(
            "w_x", [input_dim, 2 * num_units],
            initializer=self._w_x_initializer(input_dim))
        z_and_r = (_layer_norm(tf.matmul(state, w_h), scope="layer_norm/w_h") +
                   _layer_norm(tf.matmul(inputs, w_x), scope="layer_norm/w_x"))
        z, r = tf.split(tf.sigmoid(z_and_r), 2, 1)
      with tf.variable_scope("candidate"):
        w = tf.get_variable(
            "w", [input_dim, num_units], initializer=self._w_initializer)
        u = tf.get_variable(
            "u", [num_units, num_units], initializer=self._u_initializer)
        h_hat = (r * _layer_norm(tf.matmul(state, u), scope="layer_norm/u") +
                 _layer_norm(tf.matmul(inputs, w), scope="layer_norm/w"))
      new_h = (1 - z) * state + z * self._activation(h_hat)
    return new_h, new_h 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:27,代码来源:gru_cell.py

示例9: variable_on_cpu

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable [as 别名]
def variable_on_cpu(name, shape, initializer, trainable=True):
    """Helper to create a Variable stored on CPU memory.

    Args:
            name: name of the variable
            shape: list of ints
            initializer: initializer for Variable
            trainable: boolean defining if the variable is for training
    Returns:
            Variable Tensor
    """
    var = tf.get_variable(
        name, shape, initializer=initializer, trainable=trainable)
    return var


# layers 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:19,代码来源:real_nvp_utils.py

示例10: _apply_with_captured_variables

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable [as 别名]
def _apply_with_captured_variables(self, function):
    """Applies a function using previously-captured variables.

    Args:
      function: Function to apply using captured variables.  The function
          should take one argument, its enclosing variable scope.

    Returns:
      Results of function application.
    """

    def _custom_getter(getter, *args, **kwargs):
      """Retrieves the normal or moving-average variables."""
      return self._component.get_variable(var_params=getter(*args, **kwargs))

    with tf.variable_scope(
        'cell', reuse=True, custom_getter=_custom_getter) as scope:
      return function(scope) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:20,代码来源:wrapped_units.py

示例11: conv_linear

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable [as 别名]
def conv_linear(args, kw, kh, nin, nout, rate, do_bias, bias_start, prefix):
  """Convolutional linear map."""
  if not isinstance(args, (list, tuple)):
    args = [args]
  with tf.variable_scope(prefix):
    with tf.device("/cpu:0"):
      k = tf.get_variable("CvK", [kw, kh, nin, nout])
    if len(args) == 1:
      arg = args[0]
    else:
      arg = tf.concat(axis=3, values=args)
    res = tf.nn.convolution(arg, k, dilation_rate=(rate, 1), padding="SAME")
    if not do_bias: return res
    with tf.device("/cpu:0"):
      bias_term = tf.get_variable(
          "CvB", [nout], initializer=tf.constant_initializer(bias_start))
    bias_term = tf.reshape(bias_term, [1, 1, 1, nout])
    return res + bias_term 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:20,代码来源:neural_gpu.py

示例12: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable [as 别名]
def __init__(self, config):

		entity_total = config.entity
		relation_total = config.relation
		batch_size = config.batch_size
		size = config.hidden_size
		margin = config.margin

		self.pos_h = tf.placeholder(tf.int32, [None])
		self.pos_t = tf.placeholder(tf.int32, [None])
		self.pos_r = tf.placeholder(tf.int32, [None])

		self.neg_h = tf.placeholder(tf.int32, [None])
		self.neg_t = tf.placeholder(tf.int32, [None])
		self.neg_r = tf.placeholder(tf.int32, [None])

		with tf.name_scope("embedding"):
			self.ent_embeddings = tf.get_variable(name = "ent_embedding", shape = [entity_total, size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
			self.rel_embeddings = tf.get_variable(name = "rel_embedding", shape = [relation_total, size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
			pos_h_e = tf.nn.embedding_lookup(self.ent_embeddings, self.pos_h)
			pos_t_e = tf.nn.embedding_lookup(self.ent_embeddings, self.pos_t)
			pos_r_e = tf.nn.embedding_lookup(self.rel_embeddings, self.pos_r)
			neg_h_e = tf.nn.embedding_lookup(self.ent_embeddings, self.neg_h)
			neg_t_e = tf.nn.embedding_lookup(self.ent_embeddings, self.neg_t)
			neg_r_e = tf.nn.embedding_lookup(self.rel_embeddings, self.neg_r)

		if config.L1_flag:
			pos = tf.reduce_sum(abs(pos_h_e + pos_r_e - pos_t_e), 1, keep_dims = True)
			neg = tf.reduce_sum(abs(neg_h_e + neg_r_e - neg_t_e), 1, keep_dims = True)
			self.predict = pos
		else:
			pos = tf.reduce_sum((pos_h_e + pos_r_e - pos_t_e) ** 2, 1, keep_dims = True)
			neg = tf.reduce_sum((neg_h_e + neg_r_e - neg_t_e) ** 2, 1, keep_dims = True)
			self.predict = pos

		with tf.name_scope("output"):
			self.loss = tf.reduce_sum(tf.maximum(pos - neg + margin, 0)) 
开发者ID:thunlp,项目名称:TensorFlow-TransX,代码行数:39,代码来源:transE.py

示例13: build_input_graph

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable [as 别名]
def build_input_graph(self, vocab_size, emb_size, word_vocab_size, word_emb_size, word_window_size):
        """
        Gather embeddings from lookup tables.
        """
        seq_ids = tf.placeholder(dtype=INT_TYPE, shape=[None, None], name='seq_ids')
        seq_word_ids = [tf.placeholder(dtype=INT_TYPE, shape=[None, None], name='seq_feature_%d_ids' % i)
                        for i in range(word_window_size)]
        embeddings = tf.get_variable('embeddings', [vocab_size, emb_size])
        embedding_output = tf.nn.embedding_lookup([embeddings], seq_ids)
        word_outputs = []
        word_embeddings = tf.get_variable('word_embeddings', [word_vocab_size, word_emb_size])
        for i in range(word_window_size):
            word_outputs.append(tf.nn.embedding_lookup([word_embeddings], seq_word_ids[i]))

        return seq_ids, seq_word_ids, tf.concat([embedding_output] + word_outputs, 2, 'inputs') 
开发者ID:chqiwang,项目名称:convseg,代码行数:17,代码来源:tagger.py

示例14: inference

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable [as 别名]
def inference(self, scores, sequence_lengths=None):
        """
        Inference label sequence given scores.
        If transitions is given, then perform veterbi search, else perform greedy search.

        Args:
            scores: A numpy array with shape (batch, max_length, num_tags).
            sequence_lengths: A numpy array with shape (batch,).

        Returns:
            A numpy array with shape (batch, max_length).
        """

        if not self.parameters['use_crf']:
            return np.argmax(scores, 2)
        else:
            with tf.variable_scope(self.scope, reuse=True):
                transitions = tf.get_variable('transitions').eval(session=self.sess)
            paths = np.zeros(scores.shape[:2], dtype=INT_TYPE)
            for i in xrange(scores.shape[0]):
                tag_score, length = scores[i], sequence_lengths[i]
                if length == 0:
                    continue
                path, _ = crf.viterbi_decode(tag_score[:length], transitions)
                paths[i, :length] = path
            return paths 
开发者ID:chqiwang,项目名称:convseg,代码行数:28,代码来源:tagger.py

示例15: get_weight

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable [as 别名]
def get_weight(shape, gain=np.sqrt(2), use_wscale=False, fan_in=None):
    if fan_in is None: fan_in = np.prod(shape[:-1])
    std = gain / np.sqrt(fan_in) # He init
    if use_wscale:
        wscale = tf.constant(np.float32(std), name='wscale')
        return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal()) * wscale
    else:
        return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal(0, std))

#----------------------------------------------------------------------------
# Fully-connected layer. 
开发者ID:zalandoresearch,项目名称:disentangling_conditional_gans,代码行数:13,代码来源:networks.py


注:本文中的tensorflow.get_variable方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。