当前位置: 首页>>代码示例>>Python>>正文


Python core_rnn_cell_impl._linear方法代码示例

本文整理汇总了Python中tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl._linear方法的典型用法代码示例。如果您正苦于以下问题:Python core_rnn_cell_impl._linear方法的具体用法?Python core_rnn_cell_impl._linear怎么用?Python core_rnn_cell_impl._linear使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl的用法示例。


在下文中一共展示了core_rnn_cell_impl._linear方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __call__

# 需要导入模块: from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl [as 别名]
# 或者: from tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl import _linear [as 别名]
def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM).
        @param: inputs (batch,n)
        @param state: the states and hidden unit of the two cells
        """
        with tf.variable_scope(scope or type(self).__name__):
            c1, c2, h1, h2 = state

            # change bias argument to False since LN will add bias via shift
            concat = _linear([inputs, h1, h2], 5 * self._num_units, False)

            i, j, f1, f2, o = tf.split(value=concat, num_or_size_splits=5, axis=1)

            # add layer normalization to each gate
            i = ln(i, scope='i/')
            j = ln(j, scope='j/')
            f1 = ln(f1, scope='f1/')
            f2 = ln(f2, scope='f2/')
            o = ln(o, scope='o/')

            new_c = (c1 * tf.nn.sigmoid(f1 + self._forget_bias) +
                     c2 * tf.nn.sigmoid(f2 + self._forget_bias) + tf.nn.sigmoid(i) *
                     self._activation(j))

            # add layer_normalization in calculation of new hidden state
            new_h = self._activation(ln(new_c, scope='new_h/')) * tf.nn.sigmoid(o)
            new_state = LSTMStateTuple(new_c, new_h)

            return new_h, new_state 
开发者ID:philipperemy,项目名称:tensorflow-multi-dimensional-lstm,代码行数:31,代码来源:md_lstm.py

示例2: __call__

# 需要导入模块: from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl [as 别名]
# 或者: from tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl import _linear [as 别名]
def __call__(self, inputs, state, scope=None):
    with _checked_scope(self, scope or "rwa_cell", reuse=self._reuse):
      h, n, d, a_max = state

      with vs.variable_scope("u"):
        u = _linear(inputs, self._num_units, True)

      with vs.variable_scope("g"):
        g = _linear([inputs, h], self._num_units, True)

      with vs.variable_scope("a"):
        a = _linear([inputs, h], self._num_units, False) # The bias term when factored out of the numerator and denominator cancels and is unnecessary

      z = tf.multiply(u, tanh(g))

      a_newmax = tf.maximum(a_max, a)
      exp_diff = tf.exp(a_max - a_newmax)
      exp_scaled = tf.exp(a - a_newmax)

      n = tf.multiply(n, exp_diff) + tf.multiply(z, exp_scaled)  # Numerically stable update of numerator
      d = tf.multiply(d, exp_diff) + exp_scaled  # Numerically stable update of denominator
      h_new = self._activation(tf.div(n, d))

      new_state = RWACellTuple(h_new, n, d, a_newmax)

    return h_new, new_state 
开发者ID:indiejoseph,项目名称:chinese-char-rnn,代码行数:28,代码来源:rwa_cell.py

示例3: unbalance_linear

# 需要导入模块: from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl [as 别名]
# 或者: from tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl import _linear [as 别名]
def unbalance_linear(self, args, output_size, bias, bias_start=0.0, scope=None):
        inputs = args[0]
        memory = args[1]
        with tf.variable_scope(scope or "UnbalanceLinear"):
            oi = _linear(inputs, output_size, False, scope='OI')
            oi = tf.reshape(oi, [self._batch_size, 1, output_size])

            memory = tf.reshape(memory, [self._batch_size * self._mem_size, self._mem_dim])
            os = _linear(memory, output_size, bias, bias_start, scope='OS')
            os = tf.reshape(os, [self._batch_size, self._mem_size, output_size])
            return oi + os 
开发者ID:shiyemin,项目名称:shuttleNet,代码行数:13,代码来源:fops.py

示例4: __call__

# 需要导入模块: from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl [as 别名]
# 或者: from tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl import _linear [as 别名]
def __call__(self, inputs, state, scope=None):
        """GRU with attention."""

        with tf.variable_scope(scope or 'attention_cell_wrapper'):
            output, _ = self._cell(inputs, state)
            att = _linear([output, self._attn_vec], self.output_size, bias=True)
            output = output * tf.sigmoid(att)

        return output, output 
开发者ID:codekansas,项目名称:liveqa2017,代码行数:11,代码来源:model.py

示例5: __call__

# 需要导入模块: from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl [as 别名]
# 或者: from tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl import _linear [as 别名]
def __call__(self, inputs, state, scope=None):
        """Run one step of F-LSTM.

        Args:
          inputs: input Tensor, 2D, batch x num_units.
          state: this must be a tuple of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`.
          scope: not used

        Returns:
          A tuple containing:

          - A `2-D, [batch x output_dim]`, Tensor representing the output of the
            F-LSTM after reading `inputs` when previous state was `state`.
            Here output_dim is:
               num_proj if num_proj was set,
               num_units otherwise.
          - Tensor(s) representing the new state of F-LSTM after reading `inputs` when
            the previous state was `state`.  Same type and shape(s) as `state`.

        Raises:
          ValueError: If input size cannot be inferred from inputs via
            static shape inference.
        """
        (c_prev, m_prev) = state

        input_size = inputs.get_shape().with_rank(2)[1]
        if input_size.value is None:
            raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
        with vs.variable_scope(scope or "flstm_cell",
                               initializer=self._initializer):
            with vs.variable_scope("factor"):
                fact = linear([inputs, m_prev], self._factor_size, False)
            concat = linear(fact, 4 * self._num_units, True)
            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)


        c = math_ops.sigmoid(f + self._forget_bias) * c_prev + math_ops.sigmoid(i) * math_ops.tanh(j)
        m = math_ops.sigmoid(o) * self._activation(c)

        if self._num_proj is not None:
            with vs.variable_scope("projection"):
                m = linear(m, self._num_proj, bias=False, scope=scope)

        new_state = LSTMStateTuple(c, m)
        return m, new_state 
开发者ID:okuchaiev,项目名称:f-lm,代码行数:48,代码来源:factorized_lstm_cells.py

示例6: att_weight

# 需要导入模块: from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl [as 别名]
# 或者: from tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl import _linear [as 别名]
def att_weight(decoder_inputs, attention_states,
               echocell=None,
               scope=None):
    """
    Args:
        decoder_inputs: A list of 2D Tensors [batch_size x cell.input_size].
        attention_states: 3D Tensor [batch_size x attn_length x attn_size].
        num_heads: Number of attention heads that read from attention_states.
        dtype: The dtype to use for the RNN initial state (default: tf.float32).
        scope: VariableScope for the created subgraph; default: "attention_decoder".
    """
    if decoder_inputs is None:
        raise ValueError("Must provide at least 1 input to attention decoder.")
    if not attention_states.get_shape()[1:2].is_fully_defined():
        raise ValueError("Shape[1] and [2] of attention_states must be known: %s"
                        % attention_states.get_shape())

    with tf.variable_scope(scope or "attention_decoder"):
        attn_length = attention_states.get_shape()[1].value
        attn_size = attention_states.get_shape()[2].value

        # To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.
        hidden = tf.reshape(
            attention_states, [-1, attn_length, 1, attn_size])
        attention_vec_size = attn_size  # Size of query vectors for attention.
        k = tf.get_variable("AttnW",
                                        [1, 1, attn_size, attention_vec_size])
        hidden_features = tf.nn.conv2d(hidden, k, [1, 1, 1, 1], "SAME")
        v = tf.get_variable("AttnV",
                                            [attention_vec_size])

        """Put attention masks on hidden using hidden_features and decoder_inputs."""
        with tf.variable_scope("Attention"):
            if echocell == 'tfGRU':
                y = core_rnn_cell_impl._linear(decoder_inputs, attention_vec_size, True)
            else:
                y = _linear(decoder_inputs, attention_vec_size, True)
            y = tf.reshape(y, [-1, 1, 1, attention_vec_size])
            # Attention mask is a softmax of v^T * tanh(...).
            s = tf.reduce_sum(
                v * tf.tanh(hidden_features + y), [2, 3])
            #  s = tf.Print(s, [s], summarize=80)
            a = tf.nn.softmax(s)
            # Now calculate the attention-weighted vector d.
            return a 
开发者ID:shiyemin,项目名称:shuttleNet,代码行数:47,代码来源:fops.py


注:本文中的tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl._linear方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。