当前位置: 首页>>代码示例>>Python>>正文


Python rnn_cell.linear函数代码示例

本文整理汇总了Python中tensorflow.python.ops.rnn_cell.linear函数的典型用法代码示例。如果您正苦于以下问题:Python linear函数的具体用法?Python linear怎么用?Python linear使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了linear函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __call__

 def __call__(self, inputs, state, scope=None):
   gru_out, gru_state = super(GRUCellAttn, self).__call__(inputs, state, scope)
   with vs.variable_scope(scope or type(self).__name__):
     with vs.variable_scope("Attn2"):
       gamma_h = tanh(rnn_cell.linear(gru_out, self._num_units, True, 1.0))
     weights = tf.reduce_sum(self.phi_hs * gamma_h, reduction_indices=2, keep_dims=True)
     weights = tf.exp(weights - tf.reduce_max(weights, reduction_indices=0, keep_dims=True))
     weights = weights / (1e-6 + tf.reduce_sum(weights, reduction_indices=0, keep_dims=True))
     context = tf.reduce_sum(self.hs * weights, reduction_indices=0)
     with vs.variable_scope("AttnConcat"):
       out = tf.nn.relu(rnn_cell.linear([context, gru_out], self._num_units, True, 1.0))
     self.attn_map = tf.squeeze(tf.slice(weights, [0, 0, 0], [-1, -1, 1]))
     return (out, out) 
开发者ID:hrishikeshvganu,项目名称:nlc,代码行数:13,代码来源:nlc_model.py

示例2: __call__

	def __call__(self, inputs, state, episodic_gate, scope=None):
		"""Gated recurrent unit (GRU) with nunits cells."""
		
		with vs.variable_scope("MGRUCell"):  # "GRUCell"
			with vs.variable_scope("Gates"):	# Reset gate and update gate.
				# We start with bias of 1.0 to not reset and not update.
				r = rnn_cell.linear([inputs, state], self._num_units, True, 1.0, scope=scope)
				r = sigmoid(r)
			with vs.variable_scope("Candidate"):
				c = tanh(rnn_cell.linear([inputs, r * state], self._num_units, True))
			
			new_h = tf.mul(episodic_gate, c) + tf.mul((1 - episodic_gate), state)
		return new_h, new_h
开发者ID:sufengniu,项目名称:DMN-tensorflow,代码行数:13,代码来源:cell.py

示例3: downscale

 def downscale(self, inp):
   with vs.variable_scope("Downscale"):
     inp2d = tf.reshape(tf.transpose(inp, perm=[1, 0, 2]), [-1, 2 * self.size])
     out2d = rnn_cell.linear(inp2d, self.size, True, 1.0)
     out3d = tf.reshape(out2d, [self.batch_size, -1, self.size])
     out3d = tf.transpose(out3d, perm=[1, 0, 2])
     out = tanh(out3d)
   return out
开发者ID:nipengmath,项目名称:nlc,代码行数:8,代码来源:nlc_model.py

示例4: __init__

 def __init__(self, num_units, encoder_output, scope=None):
   self.hs = encoder_output
   with vs.variable_scope(scope or type(self).__name__):
     with vs.variable_scope("Attn1"):
       hs2d = tf.reshape(self.hs, [-1, num_units])
       phi_hs2d = tanh(rnn_cell.linear(hs2d, num_units, True, 1.0))
       self.phi_hs = tf.reshape(phi_hs2d, tf.shape(self.hs))
   super(GRUCellAttn, self).__init__(num_units)
开发者ID:hrishikeshvganu,项目名称:nlc,代码行数:8,代码来源:nlc_model.py

示例5: attention

 def attention(query):
     """Point on hidden using hidden_features and query."""
     with vs.variable_scope("Attention"):
         y = rnn_cell.linear(query, attention_vec_size, True)
         y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
         # Attention mask is a softmax of v^T * tanh(...).
         s = math_ops.reduce_sum(
             v * math_ops.tanh(hidden_features + y), [2, 3])
         return s
开发者ID:heshizhu,项目名称:TensorFlow-Pointer-Networks,代码行数:9,代码来源:pointer.py

示例6: testLinear

  def testLinear(self):
    with self.test_session() as sess:
      with tf.variable_scope("root", initializer=tf.constant_initializer(1.0)):
        x = tf.zeros([1, 2])
        l = linear([x], 2, False)
        sess.run([tf.initialize_all_variables()])
        res = sess.run([l], {x.name: np.array([[1., 2.]])})
        self.assertAllClose(res[0], [[3.0, 3.0]])

        # Checks prevent you from accidentally creating a shared function.
        with self.assertRaises(ValueError):
          l1 = linear([x], 2, False)

        # But you can create a new one in a new scope and share the variables.
        with tf.variable_scope("l1") as new_scope:
          l1 = linear([x], 2, False)
        with tf.variable_scope(new_scope, reuse=True):
          linear([l1], 2, False)
        self.assertEqual(len(tf.trainable_variables()), 2)
开发者ID:0-T-0,项目名称:tensorflow,代码行数:19,代码来源:rnn_cell_test.py

示例7: build_encoder

    def build_encoder(self):
        """Inference Network. q(h|X)"""
        with tf.variable_scope("encoder"):
            self.l1_lin = linear(tf.expand_dims(self.x, 0), self.embed_dim, bias=True, scope="l1")
            self.l1 = tf.nn.relu(self.l1_lin)

            self.l2_lin = linear(self.l1, self.embed_dim, bias=True, scope="l2")
            self.l2 = tf.nn.relu(self.l2_lin)

            self.mu = linear(self.l2, self.h_dim, bias=True, scope="mu")
            self.log_sigma_sq = linear(self.l2, self.h_dim, bias=True, scope="log_sigma_sq")

            self.eps = tf.random_normal((1, self.h_dim), 0, 1, dtype=tf.float32)
            self.sigma = tf.sqrt(tf.exp(self.log_sigma_sq))

            self.h = tf.add(self.mu, tf.mul(self.sigma, self.eps))

            _ = tf.histogram_summary("mu", self.mu)
            _ = tf.histogram_summary("sigma", self.sigma)
            _ = tf.histogram_summary("h", self.h)
            _ = tf.histogram_summary("mu + sigma", self.mu + self.sigma)
开发者ID:tonydeep,项目名称:variational-text-tensorflow,代码行数:21,代码来源:nvdm.py

示例8: attention

 def attention(query): 
   """Put attention masks on hidden using hidden_features and query."""
   with vs.variable_scope("Attention"):
     # Attention mask is a softmax of h_in^T*decoder_hidden.
     dec_hid = array_ops.tile(query, [1, attn_length]) # replicate query for element-wise multiplication
     dec_hid = array_ops.reshape(dec_hid, [-1, attn_length, attention_vec_size])
     attn_weight = nn_ops.softmax(math_ops.reduce_sum(attention_states*dec_hid, [2])) # attn weights for every hidden states in encoder
     # Now calculate the attention-weighted vector (context vector) cc.
     cc = math_ops.reduce_sum(array_ops.reshape(attn_weight, [-1, attn_length, 1, 1])*hidden, [1,2])
     # attented hidden state
     with vs.variable_scope("AttnW1"):
       term1 = rnn_cell.linear(query, attn_size, False)
     with vs.variable_scope("AttnW2"):
       term2 = rnn_cell.linear(cc, attn_size, False)
     # environment representation
     if env: # 2D Tensor of shape [batch_size, env_size]
       with vs.variable_scope("Environment"):
         term3 = rnn_cell.linear(math_ops.to_float(env), attn_size, False)
       h_attn = math_ops.tanh(term1 + term2 + term3)
     else:
       h_attn = math_ops.tanh(term1 + term2)
   return h_attn, attn_weight
开发者ID:LittleYUYU,项目名称:RobotNavigateNLP,代码行数:22,代码来源:seq2seq_ops.py

示例9: attention

 def attention(query):
     """Put attention masks on hidden using hidden_features and query."""
     ds = []  # Results of attention reads will be stored here.
     for a in xrange(num_heads):
         with variable_scope.variable_scope("Attention_%d" % a):
             y = rnn_cell.linear(query, attention_vec_size, True)
             y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
             # Attention mask is a softmax of v^T * tanh(...).
             s = math_ops.reduce_sum(v[a] * math_ops.tanh(hidden_features[a] + y), [2, 3])
             a = nn_ops.softmax(s)
             # Now calculate the attention-weighted vector d.
             d = math_ops.reduce_sum(array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2])
             ds.append(array_ops.reshape(d, [-1, attn_size]))
     return ds
开发者ID:sherrym,项目名称:tensorflow,代码行数:14,代码来源:seq2seq.py

示例10: setup_label_loss

    def setup_label_loss(self):
        with vs.variable_scope("LabelLogistic"):
            doshape = tf.shape(self.decoder_output)
            T, batch_size = doshape[0], doshape[1]

            # [batch_size, cell.state_size]
            # decoder_output: [batch_size, time_step, cell.state_size]
            last_state = self.decoder_output[:, -1, :]

            # projecting to label space
            # [batch_size, label_size]
            logits = rnn_cell.linear(last_state, self.label_size, True, 1.0)
            self.losses = tf.nn.softmax_cross_entropy_with_logits(logits, self.label_placeholder)
            self.predictions = logits
开发者ID:windweller,项目名称:Trident,代码行数:14,代码来源:story_model.py

示例11: setup_loss

  def setup_loss(self):
    with vs.variable_scope("Logistic"):
      do2d = tf.reshape(self.decoder_output, [-1, self.size])
      logits2d = rnn_cell.linear(do2d, self.vocab_size, True, 1.0)
      outputs2d = tf.nn.softmax(logits2d)
      self.outputs = tf.reshape(outputs2d, [-1, self.batch_size, self.vocab_size])

      targets_no_GO = tf.slice(self.target_tokens, [1, 0], [-1, -1])
      masks_no_GO = tf.slice(self.target_mask, [1, 0], [-1, -1])
      # easier to pad target/mask than to split decoder input since tensorflow does not support negative indexing
      labels1d = tf.reshape(tf.pad(targets_no_GO, [[0, 1], [0, 0]]), [-1])
      mask1d = tf.reshape(tf.pad(masks_no_GO, [[0, 1], [0, 0]]), [-1])
      losses1d = tf.nn.sparse_softmax_cross_entropy_with_logits(logits2d, labels1d) * tf.to_float(mask1d)
      losses2d = tf.reshape(losses1d, [-1, self.batch_size])
      self.losses = tf.reduce_sum(losses2d) / self.batch_size
开发者ID:wanjinchang,项目名称:nlc,代码行数:15,代码来源:nlc_model.py

示例12: downscale

  def downscale(self, inp, mask):
    with vs.variable_scope("Downscale"):
      inp2d = tf.reshape(tf.transpose(inp, perm=[1, 0, 2]), [-1, 2 * self.size])
      out2d = rnn_cell.linear(inp2d, self.size, True, 1.0)
      out3d = tf.reshape(out2d, [self.batch_size, -1, self.size])
      out3d = tf.transpose(out3d, perm=[1, 0, 2])
      out = tanh(out3d)

      mask = tf.transpose(mask)
      mask = tf.reshape(mask, [-1, 2])
      mask = tf.cast(mask, tf.bool)
      mask = tf.reduce_any(mask, reduction_indices=1)
      mask = tf.to_int32(mask)
      mask = tf.reshape(mask, [self.batch_size, -1])
      mask = tf.transpose(mask)
    return out, mask
开发者ID:wanjinchang,项目名称:nlc,代码行数:16,代码来源:nlc_model.py

示例13: basic_rnn_cell

def basic_rnn_cell(inputs, state, num_units, scope=None):
    if state is None:
        if inputs is not None:
            batch_size = inputs.get_shape()[0]
            dtype = inputs.dtype
        else:
            batch_size = 0
            dtype = tf.float32
        init_output = tf.zeros(tf.pack([batch_size, num_units]), dtype=dtype)
        init_state = tf.zeros(tf.pack([batch_size, num_units]), dtype=dtype)
        init_output.set_shape([batch_size, num_units])
        init_state.set_shape([batch_size, num_units])
        return init_output, init_state
    else:
        with tf.variable_op_scope([inputs, state], scope, "BasicRNNCell"):
            output = tf.tanh(linear([inputs, state], num_units, True))
        return output, output
开发者ID:RuhiSharma,项目名称:tensorflow,代码行数:17,代码来源:rnn_cell_test.py

示例14: downscale

  def downscale(self, inp, mask):
    with vs.variable_scope("Downscale"):
      inshape = tf.shape(inp)
      T, batch_size, dim = inshape[0], inshape[1], inshape[2]
      inp2d = tf.reshape(tf.transpose(inp, perm=[1, 0, 2]), [-1, 2 * self.size])
      out2d = rnn_cell.linear(inp2d, self.size, True, 1.0)
      out3d = tf.reshape(out2d, tf.pack((batch_size, tf.to_int32(T/2), dim)))
      out3d = tf.transpose(out3d, perm=[1, 0, 2])
      out3d.set_shape([None, None, self.size])
      out = tanh(out3d)

      mask = tf.transpose(mask)
      mask = tf.reshape(mask, [-1, 2])
      mask = tf.cast(mask, tf.bool)
      mask = tf.reduce_any(mask, reduction_indices=1)
      mask = tf.to_int32(mask)
      mask = tf.reshape(mask, tf.pack([batch_size, -1]))
      mask = tf.transpose(mask)
    return out, mask
开发者ID:hrishikeshvganu,项目名称:nlc,代码行数:19,代码来源:nlc_model.py

示例15: dnn

def dnn(tensor_in, hidden_units, activation=nn.relu, dropout=None):
  """Creates fully connected deep neural network subgraph.

  Args:
    tensor_in: tensor or placeholder for input features.
    hidden_units: list of counts of hidden units in each layer.
    activation: activation function between layers. Can be None.
    dropout: if not None, will add a dropout layer with given probability.

  Returns:
    A tensor which would be a deep neural network.
  """
  with vs.variable_scope('dnn'):
    for i, n_units in enumerate(hidden_units):
      with vs.variable_scope('layer%d' % i):
        tensor_in = rnn_cell.linear(tensor_in, n_units, True)
        if activation is not None:
          tensor_in = activation(tensor_in)
        if dropout is not None:
          tensor_in = dropout_ops.dropout(tensor_in, prob=(1.0 - dropout))
    return tensor_in
开发者ID:01bui,项目名称:tensorflow,代码行数:21,代码来源:dnn_ops.py


注:本文中的tensorflow.python.ops.rnn_cell.linear函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。