当前位置: 首页>>代码示例>>Python>>正文


Python rnn.LSTMStateTuple方法代码示例

本文整理汇总了Python中tensorflow.contrib.rnn.LSTMStateTuple方法的典型用法代码示例。如果您正苦于以下问题:Python rnn.LSTMStateTuple方法的具体用法?Python rnn.LSTMStateTuple怎么用?Python rnn.LSTMStateTuple使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.rnn的用法示例。


在下文中一共展示了rnn.LSTMStateTuple方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __call__

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LSTMStateTuple [as 别名]
def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with tf.variable_scope(self, scope or "basic_lstm_cell", reuse=self._reuse):
            # Parameters of gates are concatenated into one multiply for
            # efficiency.
            if self._state_is_tuple:
                c_prev, h_prev = state
            else:
                c_prev, h_prev = tf.split(
                    value=state, num_or_size_splits=2, axis=1)
            concat = tf.contrib.rnn._linear(
                [inputs, h_prev], 4 * self._num_units, True)

            # i = input_gate, g = new_input, f = forget_gate, o = output_gate
            i, g, f, o = tf.split(value=concat, num_or_size_splits=4, axis=1)

            c = (c_prev * tf.sigmoid(f + self._forget_bias) +
                 tf.sigmoid(i) * tf.tanh(g))
            h = tf.tanh(c) * tf.sigmoid(o)

            if self._state_is_tuple:
                new_state = LSTMStateTuple(c, h)
            else:
                new_state = tf.concat([c, h], 1)
            return h, new_state 
开发者ID:hirofumi0810,项目名称:tensorflow_end2end_speech_recognition,代码行数:27,代码来源:basic_lstm.py

示例2: rnn_placeholders

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LSTMStateTuple [as 别名]
def rnn_placeholders(state):
    """
    Given nested [multilayer] RNN state tensor, infers and returns state placeholders.

    Args:
        state:  tf.nn.lstm zero-state tuple.

    Returns:    tuple of placeholders
    """
    if isinstance(state, tf.contrib.rnn.LSTMStateTuple):
        c, h = state
        c = tf.placeholder(tf.float32, tf.TensorShape([None]).concatenate(c.get_shape()[1:]), c.op.name + '_c_pl')
        h = tf.placeholder(tf.float32, tf.TensorShape([None]).concatenate(h.get_shape()[1:]), h.op.name + '_h_pl')
        return tf.contrib.rnn.LSTMStateTuple(c, h)
    elif isinstance(state, tf.Tensor):
        h = state
        h = tf.placeholder(tf.float32, tf.TensorShape([None]).concatenate(h.get_shape()[1:]), h.op.name + '_h_pl')
        return h
    else:
        structure = [rnn_placeholders(x) for x in state]
        return tuple(structure) 
开发者ID:Kismuz,项目名称:btgym,代码行数:23,代码来源:utils.py

示例3: _show_struct

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LSTMStateTuple [as 别名]
def _show_struct(struct):
    # Debug utility
    if isinstance(struct, dict):
        for key, value in struct.items():
            print(key)
            _show_struct(value)

    elif type(struct) in [LSTMStateTuple, tuple, list]:
        print('LSTM/tuple/list:', type(struct), len(struct))
        for i in struct:
            _show_struct(i)

    else:
        try:
            print('shape: {}, type: {}'.format(np.asarray(struct).shape, type(struct)))

        except AttributeError:
            print('value:', struct) 
开发者ID:Kismuz,项目名称:btgym,代码行数:20,代码来源:utils.py

示例4: as_array

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LSTMStateTuple [as 别名]
def as_array(self, struct, squeeze_axis=None):
        if isinstance(struct, dict):
            out = {}
            for key, value in struct.items():
                out[key] = self.as_array(value, squeeze_axis)
            return out

        elif isinstance(struct, tuple):
            return tuple([self.as_array(value, squeeze_axis) for value in struct])

        elif isinstance(struct, LSTMStateTuple):
            return LSTMStateTuple(self.as_array(struct[0], squeeze_axis), self.as_array(struct[1], squeeze_axis))

        else:
            if squeeze_axis is not None:
                return np.squeeze(np.asarray(struct), axis=squeeze_axis)

            else:
                return np.asarray(struct) 
开发者ID:Kismuz,项目名称:btgym,代码行数:21,代码来源:rollout.py

示例5: mask_finished

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LSTMStateTuple [as 别名]
def mask_finished(finished, now_, prev_):
        mask = tf.expand_dims(tf.to_float(finished), 1)

        if isinstance(prev_, tuple):
            # tuple states
            next_ = []
            for ns, s in zip(now_, prev_):
                # fucking LSTMStateTuple
                if isinstance(ns, LSTMStateTuple):
                    next_.append(
                        LSTMStateTuple(c=(1. - mask) * ns.c + mask * s.c,
                                       h=(1. - mask) * ns.h + mask * s.h))
                else:
                    next_.append((1. - mask) * ns + mask * s)
            next_ = tuple(next_)
        else:
            next_ = (1. - mask) * now_ + mask * prev_

        return next_ 
开发者ID:SwordYork,项目名称:sequencing,代码行数:21,代码来源:attention_decoder.py

示例6: predict

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LSTMStateTuple [as 别名]
def predict(self, batch, states, batchsize=1):
        """

        :param batch: batch of data
        :param states: prior states
        :param batchsize: batchsize if getting initial state
        :return: 
            predicted_outputs: softmax predictions
            states: states of RNN
        """

        if states is None:
            return self.session.run([self.predicted_outputs, self.rnn_states], {self.inputs: batch})
        else:
            c = states[0]
            h = states[1]
            return self.session.run([self.predicted_outputs, self.rnn_states],
                                    {self.inputs: batch,
                                     self.initial_state: LSTMStateTuple(c, h)}) 
开发者ID:zalandoresearch,项目名称:probrnn,代码行数:21,代码来源:graphs.py

示例7: build_lstm

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LSTMStateTuple [as 别名]
def build_lstm(x, size, name, step_size):
    lstm = rnn.BasicLSTMCell(size, state_is_tuple=True)

    c_init = np.zeros((1, lstm.state_size.c), np.float32)
    h_init = np.zeros((1, lstm.state_size.h), np.float32)
    state_init = [c_init, h_init]

    c_in = tf.placeholder(tf.float32, 
            shape=[1, lstm.state_size.c],
            name='c_in')
    h_in = tf.placeholder(tf.float32, 
            shape=[1, lstm.state_size.h],
            name='h_in')
    state_in = [c_in, h_in]

    state_in = rnn.LSTMStateTuple(c_in, h_in)

    lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
        lstm, x, initial_state=state_in, sequence_length=step_size,
        time_major=False)
    lstm_outputs = tf.reshape(lstm_outputs, [-1, size])

    lstm_c, lstm_h = lstm_state
    state_out = [lstm_c[:1, :], lstm_h[:1, :]]
    return lstm_outputs, state_init, state_in, state_out 
开发者ID:davidhershey,项目名称:feudal_networks,代码行数:27,代码来源:models.py

示例8: __init__

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LSTMStateTuple [as 别名]
def __init__(self,x,size,step_size):
        lstm = rnn.BasicLSTMCell(size, state_is_tuple=True)

        c_init = np.zeros((1, lstm.state_size.c), np.float32)
        h_init = np.zeros((1, lstm.state_size.h), np.float32)
        self.state_init = [c_init, h_init]

        c_in = tf.placeholder(tf.float32, 
                shape=[1, lstm.state_size.c],
                name='c_in')
        h_in = tf.placeholder(tf.float32, 
                shape=[1, lstm.state_size.h],
                name='h_in')
        self.state_in = [c_in, h_in]

        state_in = rnn.LSTMStateTuple(c_in, h_in)

        lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
            lstm, x, initial_state=state_in, sequence_length=step_size,
            time_major=False)
        lstm_outputs = tf.reshape(lstm_outputs, [-1, size])

        lstm_c, lstm_h = lstm_state
        self.state_out = [lstm_c[:1, :], lstm_h[:1, :]]
        self.output = lstm_outputs 
开发者ID:davidhershey,项目名称:feudal_networks,代码行数:27,代码来源:models.py

示例9: __call__

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LSTMStateTuple [as 别名]
def __call__(self, inputs, state, scope=None):
        with tf.variable_scope(self.scope):
            c, h = state
            h = dropout(h, self.keep_recurrent_probs, self.is_train)

            mat = _compute_gates(inputs, h, self.num_units, self.forget_bias,
                                        self.kernel_initializer, self.recurrent_initializer, True)

            i, j, f, o = tf.split(value=mat, num_or_size_splits=4, axis=1)

            new_c = (c * self.recurrent_activation(f) + self.recurrent_activation(i) *
                     self.activation(j))
            new_h = self.activation(new_c) * self.recurrent_activation(o)

            new_state = LSTMStateTuple(new_c, new_h)

        return new_h, new_state 
开发者ID:allenai,项目名称:document-qa,代码行数:19,代码来源:recurrent_layers.py

示例10: decoder

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LSTMStateTuple [as 别名]
def decoder(x, decoder_inputs, keep_prob, sequence_length, memory, memory_length, first_attention):
    with tf.variable_scope("Decoder") as scope:
        label_embeddings = tf.get_variable(name="embeddings", shape=[n_classes, embedding_size], dtype=tf.float32)
        train_inputs_embedded = tf.nn.embedding_lookup(label_embeddings, decoder_inputs)
        lstm = rnn.LayerNormBasicLSTMCell(n_hidden, dropout_keep_prob=keep_prob)
        output_l = layers_core.Dense(n_classes, use_bias=True)
        encoder_state = rnn.LSTMStateTuple(x, x)
        attention_mechanism = BahdanauAttention(embedding_size, memory=memory, memory_sequence_length=memory_length)
        cell = AttentionWrapper(lstm, attention_mechanism, output_attention=False)
        cell_state = cell.zero_state(dtype=tf.float32, batch_size=train_batch_size)
        cell_state = cell_state.clone(cell_state=encoder_state, attention=first_attention)
        train_helper = TrainingHelper(train_inputs_embedded, sequence_length)
        train_decoder = BasicDecoder(cell, train_helper, cell_state, output_layer=output_l)
        decoder_outputs_train, decoder_state_train, decoder_seq_train = dynamic_decode(train_decoder, impute_finished=True)
        tiled_inputs = tile_batch(memory, multiplier=beam_width)
        tiled_sequence_length = tile_batch(memory_length, multiplier=beam_width)
        tiled_first_attention = tile_batch(first_attention, multiplier=beam_width)
        attention_mechanism = BahdanauAttention(embedding_size, memory=tiled_inputs, memory_sequence_length=tiled_sequence_length)
        x2 = tile_batch(x, beam_width)
        encoder_state2 = rnn.LSTMStateTuple(x2, x2)
        cell = AttentionWrapper(lstm, attention_mechanism, output_attention=False)
        cell_state = cell.zero_state(dtype=tf.float32, batch_size=test_batch_size * beam_width)
        cell_state = cell_state.clone(cell_state=encoder_state2, attention=tiled_first_attention)
        infer_decoder = BeamSearchDecoder(cell, embedding=label_embeddings, start_tokens=[GO] * test_len, end_token=EOS,
                                          initial_state=cell_state, beam_width=beam_width, output_layer=output_l)
        decoder_outputs_infer, decoder_state_infer, decoder_seq_infer = dynamic_decode(infer_decoder, maximum_iterations=4)
        return decoder_outputs_train, decoder_outputs_infer, decoder_state_infer 
开发者ID:thunlp,项目名称:Auto_CLIWC,代码行数:29,代码来源:train_liwc.py

示例11: state_size

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LSTMStateTuple [as 别名]
def state_size(self):
        return (LSTMStateTuple(self._num_units, self._num_units)
                if self._state_is_tuple else 2 * self._num_units) 
开发者ID:hirofumi0810,项目名称:tensorflow_end2end_speech_recognition,代码行数:5,代码来源:basic_lstm.py

示例12: _get_state_names

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LSTMStateTuple [as 别名]
def _get_state_names(cell):
  """Gets the state names for an `RNNCell`.

  Args:
    cell: A `RNNCell` to be used in the RNN.

  Returns:
    State names in the form of a string, a list of strings, or a list of
    string pairs, depending on the type of `cell.state_size`.

  Raises:
    TypeError: If cell.state_size is of type TensorShape.
  """
  state_size = cell.state_size
  if isinstance(state_size, tensor_shape.TensorShape):
    raise TypeError('cell.state_size of type TensorShape is not supported.')
  if isinstance(state_size, int):
    return '{}_{}'.format(rnn_common.RNNKeys.STATE_PREFIX, 0)
  if isinstance(state_size, rnn_cell.LSTMStateTuple):
    return [
        '{}_{}_c'.format(rnn_common.RNNKeys.STATE_PREFIX, 0),
        '{}_{}_h'.format(rnn_common.RNNKeys.STATE_PREFIX, 0),
    ]
  if isinstance(state_size[0], rnn_cell.LSTMStateTuple):
    return [[
        '{}_{}_c'.format(rnn_common.RNNKeys.STATE_PREFIX, i),
        '{}_{}_h'.format(rnn_common.RNNKeys.STATE_PREFIX, i),
    ] for i in range(len(state_size))]
  return [
      '{}_{}'.format(rnn_common.RNNKeys.STATE_PREFIX, i)
      for i in range(len(state_size))] 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:33,代码来源:state_saving_rnn_estimator.py

示例13: batch_gather

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LSTMStateTuple [as 别名]
def batch_gather(batch_dict, indices, _top=True):
    """
    Gathers experiences from processed batch according to specified indices.

    Args:
        batch_dict:     batched data dictionary
        indices:        array-like, indices to gather
        _top:           internal

    Returns:
        batched data of same structure as dict

    """
    batch = {}

    if isinstance(batch_dict, dict):
        for key, value in batch_dict.items():
            batch[key] = batch_gather(value, indices, False)

    elif isinstance(batch_dict, LSTMStateTuple):
        c = batch_gather(batch_dict[0], indices, False)
        h = batch_gather(batch_dict[1], indices, False)
        batch = LSTMStateTuple(c=c, h=h)

    elif isinstance(batch_dict, tuple):
        batch = tuple([batch_gather(struct, indices, False) for struct in batch_dict])

    else:
        batch = np.take(batch_dict, indices=indices, axis=0, mode='wrap')

    if _top:
        # Mind shape inference:
        batch['batch_size'] = indices.shape[0]

    return batch 
开发者ID:Kismuz,项目名称:btgym,代码行数:37,代码来源:utils.py

示例14: pop_frame

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LSTMStateTuple [as 别名]
def pop_frame(self, idx, _struct=None):
        """
        Pops single experience from rollout.

        Args:
            idx:    experience position

        Returns:
            frame as [nested] dictionary
        """
        # No idx range checks here!
        if _struct is None:
            _struct = self

        if isinstance(_struct, dict) or type(_struct) == type(self):
            frame = {}
            for key, value in _struct.items():
                frame[key] = self.pop_frame(idx, value)
            return frame

        elif isinstance(_struct, tuple):
            return tuple([self.pop_frame(idx, value) for value in _struct])

        elif isinstance(_struct, LSTMStateTuple):
            return LSTMStateTuple(self.pop_frame(idx, _struct[0]), self.pop_frame(idx, _struct[1]))

        else:
            return _struct.pop(idx) 
开发者ID:Kismuz,项目名称:btgym,代码行数:30,代码来源:rollout.py

示例15: state_size

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LSTMStateTuple [as 别名]
def state_size(self):
        return rnn.LSTMStateTuple(self._num_units, self._num_units) 
开发者ID:RandolphVI,项目名称:Text-Pairs-Relation-Classification,代码行数:4,代码来源:text_sann.py


注:本文中的tensorflow.contrib.rnn.LSTMStateTuple方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。