當前位置: 首頁>>代碼示例>>Python>>正文


Python rnn_cell_impl._linear方法代碼示例

本文整理匯總了Python中tensorflow.python.ops.rnn_cell_impl._linear方法的典型用法代碼示例。如果您正苦於以下問題:Python rnn_cell_impl._linear方法的具體用法?Python rnn_cell_impl._linear怎麽用?Python rnn_cell_impl._linear使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.ops.rnn_cell_impl的用法示例。


在下文中一共展示了rnn_cell_impl._linear方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _attention

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _linear [as 別名]
def _attention(self, query, attn_states):
    conv2d = nn_ops.conv2d
    reduce_sum = math_ops.reduce_sum
    softmax = nn_ops.softmax
    tanh = math_ops.tanh

    with vs.variable_scope("attention"):
      k = vs.get_variable(
          "attn_w", [1, 1, self._attn_size, self._attn_vec_size])
      v = vs.get_variable("attn_v", [self._attn_vec_size])
      hidden = array_ops.reshape(attn_states,
                                 [-1, self._attn_length, 1, self._attn_size])
      hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
      y = _linear(query, self._attn_vec_size, True)
      y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
      s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
      a = softmax(s)
      d = reduce_sum(
          array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
      new_attns = array_ops.reshape(d, [-1, self._attn_size])
      new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
      return new_attns, new_attn_states 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:24,代碼來源:rnn_cell.py

示例2: linear

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _linear [as 別名]
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
           is_train=None):
    if args is None or (nest.is_sequence(args) and not args):
        raise ValueError("`args` must be specified")
    if not nest.is_sequence(args):
        args = [args]

    flat_args = [flatten(arg, 1) for arg in args]
    if input_keep_prob < 1.0:
        assert is_train is not None
        flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg)
                     for arg in flat_args]
    with tf.variable_scope(scope or 'Linear'):
        flat_out = _linear(flat_args, output_size, bias, bias_initializer=tf.constant_initializer(bias_start))
    out = reconstruct(flat_out, args[0], 1)
    if squeeze:
        out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
    if wd:
        add_wd(wd)

    return out 
開發者ID:IsaacChanghau,項目名稱:AmusingPythonCodes,代碼行數:23,代碼來源:nn.py

示例3: call

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _linear [as 別名]
def call(self, inputs, state):
    """Run the input projection and then the cell."""
    # Default scope: "InputProjectionWrapper"
    projected = _linear(inputs, self._num_proj, True)
    if self._activation:
      projected = self._activation(projected)
    return self._cell(projected, state) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:9,代碼來源:core_rnn_cell.py

示例4: call

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _linear [as 別名]
def call(self, inputs, state):
    """Long short-term memory cell with attention (LSTMA)."""
    if self._state_is_tuple:
      state, attns, attn_states = state
    else:
      states = state
      state = array_ops.slice(states, [0, 0], [-1, self._cell.state_size])
      attns = array_ops.slice(
          states, [0, self._cell.state_size], [-1, self._attn_size])
      attn_states = array_ops.slice(
          states, [0, self._cell.state_size + self._attn_size],
          [-1, self._attn_size * self._attn_length])
    attn_states = array_ops.reshape(attn_states,
                                    [-1, self._attn_length, self._attn_size])
    input_size = self._input_size
    if input_size is None:
      input_size = inputs.get_shape().as_list()[1]
    inputs = _linear([inputs, attns], input_size, True)
    lstm_output, new_state = self._cell(inputs, state)
    if self._state_is_tuple:
      new_state_cat = array_ops.concat(nest.flatten(new_state), 1)
    else:
      new_state_cat = new_state
    new_attns, new_attn_states = self._attention(new_state_cat, attn_states)
    with vs.variable_scope("attn_output_projection"):
      output = _linear([lstm_output, new_attns], self._attn_size, True)
    new_attn_states = array_ops.concat(
        [new_attn_states, array_ops.expand_dims(output, 1)], 1)
    new_attn_states = array_ops.reshape(
        new_attn_states, [-1, self._attn_length * self._attn_size])
    new_state = (new_state, new_attns, new_attn_states)
    if not self._state_is_tuple:
      new_state = array_ops.concat(list(new_state), 1)
    return output, new_state 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:36,代碼來源:rnn_cell.py

示例5: _linear

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _linear [as 別名]
def _linear(self, args):
    out_size = 4 * self._num_units
    proj_size = args.get_shape()[-1]
    weights = vs.get_variable("kernel", [proj_size, out_size])
    out = math_ops.matmul(args, weights)
    if not self._layer_norm:
      bias = vs.get_variable("bias", [out_size])
      out = nn_ops.bias_add(out, bias)
    return out 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:11,代碼來源:rnn_cell.py

示例6: linear

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _linear [as 別名]
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, keep_prob=None, is_train=None):
    if args is None or (nest.is_sequence(args) and not args):
        raise ValueError("args must be specified")
    if not nest.is_sequence(args):
        args = [args]
    flat_args = [flatten(arg, 1) for arg in args]
    if keep_prob is not None and is_train is not None:
        flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, keep_prob), lambda: arg) for arg in flat_args]
    with tf.variable_scope(scope or 'linear'):
        flat_out = _linear(flat_args, output_size, bias, bias_initializer=tf.constant_initializer(bias_start))
    out = reconstruct(flat_out, args[0], 1)
    if squeeze:
        out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
    return out 
開發者ID:IsaacChanghau,項目名稱:Dense_BiLSTM,代碼行數:16,代碼來源:nns.py

示例7: _encode

# 需要導入模塊: from tensorflow.python.ops import rnn_cell_impl [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell_impl import _linear [as 別名]
def _encode(self, input_matrix, word_ids, embed_size):
        input_embeds = tf.nn.embedding_lookup(input_matrix, word_ids, name="input_embeds")

        M, K = self.M, self.K

        with tf.variable_scope("h"):
            h = tf.nn.tanh(_linear(input_embeds, M * K/2, True))
        with tf.variable_scope("logits"):
            logits = _linear(h, M * K, True)
            logits = tf.log(tf.nn.softplus(logits) + 1e-8)
        logits = tf.reshape(logits, [-1, M, K], name="logits")
        return input_embeds, logits 
開發者ID:zomux,項目名稱:neuralcompressor,代碼行數:14,代碼來源:embed_compress.py


注:本文中的tensorflow.python.ops.rnn_cell_impl._linear方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。