當前位置: 首頁>>代碼示例>>Python>>正文


Python rnn_cell.GRUCell方法代碼示例

本文整理匯總了Python中tensorflow.python.ops.rnn_cell.GRUCell方法的典型用法代碼示例。如果您正苦於以下問題:Python rnn_cell.GRUCell方法的具體用法?Python rnn_cell.GRUCell怎麽用?Python rnn_cell.GRUCell使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.ops.rnn_cell的用法示例。


在下文中一共展示了rnn_cell.GRUCell方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import GRUCell [as 別名]
def __init__(self,n_uid, n_mid, EMBEDDING_DIM, HIDDEN_SIZE, BATCH_SIZE, SEQ_LEN=256):
        super(Model_ARNN, self).__init__(n_uid, n_mid, EMBEDDING_DIM, HIDDEN_SIZE, 
                                           BATCH_SIZE, SEQ_LEN, Flag="ARNN")
        with tf.name_scope('rnn_1'):
            self.sequence_length = tf.Variable([SEQ_LEN] * BATCH_SIZE)
            rnn_outputs, final_state1 = dynamic_rnn(GRUCell(2*EMBEDDING_DIM), inputs=self.item_his_eb,
                                         sequence_length=self.sequence_length, dtype=tf.float32,
                                         scope="gru1")
            tf.summary.histogram('GRU_outputs', rnn_outputs)
        # Attention layer
        with tf.name_scope('Attention_layer_1'):
            att_gru = din_attention(self.item_eb, rnn_outputs, HIDDEN_SIZE, self.mask)
            att_gru = tf.reduce_sum(att_gru, 1)

        inp = tf.concat([self.item_eb, self.item_his_eb_sum, final_state1, att_gru], -1)
        self.build_fcn_net(inp, use_dice=False) 
開發者ID:UIC-Paper,項目名稱:MIMN,代碼行數:18,代碼來源:model.py

示例2: __init__

# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import GRUCell [as 別名]
def __init__(self, num_layers, num_units, batch_size, input_size, keep_prob=1.0, is_train=None, scope="native_gru"):
        self.num_layers = num_layers
        self.grus = []
        self.inits = []
        self.dropout_mask = []
        self.scope = scope
        for layer in range(num_layers):
            input_size_ = input_size if layer == 0 else 2 * num_units
            gru_fw = tf.contrib.rnn.GRUCell(num_units)
            gru_bw = tf.contrib.rnn.GRUCell(num_units)
            init_fw = tf.Variable(tf.zeros([batch_size, num_units]))
            init_bw = tf.Variable(tf.zeros([batch_size, num_units]))
            mask_fw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32), keep_prob=keep_prob,
                              is_train=is_train, mode=None)  # pre-defined dropout mask for each layer
            mask_bw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32), keep_prob=keep_prob,
                              is_train=is_train, mode=None)
            self.grus.append((gru_fw, gru_bw, ))
            self.inits.append((init_fw, init_bw, ))
            self.dropout_mask.append((mask_fw, mask_bw, )) 
開發者ID:IsaacChanghau,項目名稱:AmusingPythonCodes,代碼行數:21,代碼來源:func.py

示例3: __init__

# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import GRUCell [as 別名]
def __init__(self, hidden_size, keep_prob):
        """
        Inputs:
          hidden_size: int. Hidden size of the RNN
          keep_prob: Tensor containing a single scalar that is the keep probability (for dropout)
        """
        self.hidden_size = hidden_size
        self.keep_prob = keep_prob
        self.rnn_cell_fw = rnn_cell.GRUCell(self.hidden_size)
        self.rnn_cell_fw = DropoutWrapper(self.rnn_cell_fw, input_keep_prob=self.keep_prob)
        self.rnn_cell_bw = rnn_cell.GRUCell(self.hidden_size)
        self.rnn_cell_bw = DropoutWrapper(self.rnn_cell_bw, input_keep_prob=self.keep_prob) 
開發者ID:abisee,項目名稱:cs224n-win18-squad,代碼行數:14,代碼來源:modules.py

示例4: __init__

# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import GRUCell [as 別名]
def __init__(self, num_units, tied=False, non_recurrent_fn=None):
    super(Grid2GRUCell, self).__init__(
        num_units=num_units, num_dims=2,
        input_dims=0, output_dims=0, priority_dims=0, tied=tied,
        non_recurrent_dims=None if non_recurrent_fn is None else 0,
        cell_fn=lambda n, i: rnn_cell.GRUCell(num_units=n, input_size=i),
        non_recurrent_fn=non_recurrent_fn) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:9,代碼來源:grid_rnn_cell.py

示例5: _create_rnn_cell

# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import GRUCell [as 別名]
def _create_rnn_cell(self):
        cell = GRUCell(self.cfg.num_units) if self.cfg.cell_type == "gru" else LSTMCell(self.cfg.num_units)
        if self.cfg.use_dropout:
            cell = DropoutWrapper(cell, output_keep_prob=self.keep_prob)
        if self.cfg.use_residual:
            cell = ResidualWrapper(cell)
        return cell 
開發者ID:IsaacChanghau,項目名稱:AmusingPythonCodes,代碼行數:9,代碼來源:seq2seq_model.py

示例6: __init__

# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import GRUCell [as 別名]
def __init__(self, num_units, cell_type='lstm', scope='bi_rnn'):
        self.cell_fw = LSTMCell(num_units) if cell_type == 'lstm' else GRUCell(num_units)
        self.cell_bw = LSTMCell(num_units) if cell_type == 'lstm' else GRUCell(num_units)
        self.scope = scope 
開發者ID:IsaacChanghau,項目名稱:AmusingPythonCodes,代碼行數:6,代碼來源:rnns.py

示例7: _create_single_rnn_cell

# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import GRUCell [as 別名]
def _create_single_rnn_cell(self, num_units):
        cell = GRUCell(num_units) if self.cfg["cell_type"] == "gru" else LSTMCell(num_units)
        return cell 
開發者ID:IsaacChanghau,項目名稱:neural_sequence_labeling,代碼行數:5,代碼來源:base_model.py

示例8: _create_single_rnn_cell

# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import GRUCell [as 別名]
def _create_single_rnn_cell(self, num_units):
        cell = GRUCell(num_units) if self.cfg["cell_type"] == "gru" else LSTMCell(num_units)
        if self.cfg["use_dropout"]:
            cell = DropoutWrapper(cell, output_keep_prob=self.rnn_keep_prob)
        if self.cfg["use_residual"]:
            cell = ResidualWrapper(cell)
        return cell 
開發者ID:IsaacChanghau,項目名稱:neural_sequence_labeling,代碼行數:9,代碼來源:multi_attention_model.py

示例9: __init__

# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import GRUCell [as 別名]
def __init__(self, num_units, cell_type='lstm', scope=None):
        self.cell_fw = GRUCell(num_units) if cell_type == 'gru' else LSTMCell(num_units)
        self.cell_bw = GRUCell(num_units) if cell_type == 'gru' else LSTMCell(num_units)
        self.scope = scope or "bi_rnn" 
開發者ID:IsaacChanghau,項目名稱:neural_sequence_labeling,代碼行數:6,代碼來源:nns.py

示例10: decode_model_with_buckets

# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import GRUCell [as 別名]
def decode_model_with_buckets(encoder_inputs, decoder_inputs, targets, weights, decoder_emotions,
                                             buckets, seq2seq, softmax_loss_function=None,
                                             per_example_loss=False, name=None):
    """Create a sequence-to-sequence model with support for bucketing.
    The seq2seq argument is a function that defines a sequence-to-sequence model,
    e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(x, y, rnn_cell.GRUCell(24))
    Args:
        encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input.
        decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input.
        targets: A list of 1D batch-sized int32 Tensors (desired output sequence).
        weights: List of 1D batch-sized float-Tensors to weight the targets.
        buckets: A list of pairs of (input size, output size) for each bucket.
        seq2seq: A sequence-to-sequence model function; it takes 2 input that
            agree with encoder_inputs and decoder_inputs, and returns a pair
            consisting of outputs and states (as, e.g., basic_rnn_seq2seq).
        softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
            to be used instead of the standard softmax (the default if this is None).
        per_example_loss: Boolean. If set, the returned loss will be a batch-sized
            tensor of losses for each sequence in the batch. If unset, it will be
            a scalar with the averaged loss from all examples.
        name: Optional name for this operation, defaults to "model_with_buckets".
    Returns:
        A tuple of the form (outputs, losses), where:
            outputs: The outputs for each bucket. Its j'th element consists of a list
                of 2D Tensors of shape [batch_size x num_decoder_symbols] (jth outputs).
            losses: List of scalar Tensors, representing losses for each bucket, or,
                if per_example_loss is set, a list of 1D batch-sized float Tensors.
    Raises:
        ValueError: If length of encoder_inputsut, targets, or weights is smaller
            than the largest (last) bucket.
    """
    if len(encoder_inputs) < buckets[-1][0]:
        raise ValueError("Length of encoder_inputs (%d) must be at least that of la"
                                         "st bucket (%d)." % (len(encoder_inputs), buckets[-1][0]))
    if len(targets) < buckets[-1][1]:
        raise ValueError("Length of targets (%d) must be at least that of last"
                                         "bucket (%d)." % (len(targets), buckets[-1][1]))
    if len(weights) < buckets[-1][1]:
        raise ValueError("Length of weights (%d) must be at least that of last"
                                         "bucket (%d)." % (len(weights), buckets[-1][1]))

    all_inputs = encoder_inputs + decoder_inputs + targets + weights + [decoder_emotions]

    losses = []
    outputs = []
    beam_results = []
    beam_symbols = []
    beam_parents = []
    with ops.name_scope(name, "model_with_buckets", all_inputs):
        for j, bucket in enumerate(buckets):
            with variable_scope.variable_scope(variable_scope.get_variable_scope(),
                                                                                 reuse=True if j > 0 else None):
                bucket_outputs, _, beam_result, beam_symbol, beam_parent = seq2seq(encoder_inputs[:bucket[0]],
                                                                        decoder_inputs[:bucket[1]], decoder_emotions)
                outputs.append(bucket_outputs)
                beam_results.append(beam_result)
                beam_symbols.append(beam_symbol)
                beam_parents.append(beam_parent)
    print("End**********")

    return outputs, beam_results, beam_symbols, beam_parents 
開發者ID:thu-coai,項目名稱:ecm,代碼行數:63,代碼來源:seq2seq.py

示例11: _build_model

# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import GRUCell [as 別名]
def _build_model(self):
        with tf.variable_scope("embeddings"):
            self.source_embs = tf.get_variable(name="source_embs", shape=[self.cfg.source_vocab_size, self.cfg.emb_dim],
                                               dtype=tf.float32, trainable=True)
            self.target_embs = tf.get_variable(name="embeddings", shape=[self.cfg.vocab_size, self.cfg.emb_dim],
                                               dtype=tf.float32, trainable=True)
            source_emb = tf.nn.embedding_lookup(self.source_embs, self.enc_source)
            target_emb = tf.nn.embedding_lookup(self.target_embs, self.dec_target_in)
            print("source embedding shape: {}".format(source_emb.get_shape().as_list()))
            print("target input embedding shape: {}".format(target_emb.get_shape().as_list()))

        with tf.variable_scope("encoder"):
            if self.cfg.use_bi_rnn:
                with tf.variable_scope("bi-directional_rnn"):
                    cell_fw = GRUCell(self.cfg.num_units) if self.cfg.cell_type == "gru" else \
                        LSTMCell(self.cfg.num_units)
                    cell_bw = GRUCell(self.cfg.num_units) if self.cfg.cell_type == "gru" else \
                        LSTMCell(self.cfg.num_units)
                    bi_outputs, _ = bidirectional_dynamic_rnn(cell_fw, cell_bw, source_emb, dtype=tf.float32,
                                                              sequence_length=self.enc_seq_len)
                    source_emb = tf.concat(bi_outputs, axis=-1)
                    print("bi-directional rnn output shape: {}".format(source_emb.get_shape().as_list()))
            input_project = tf.layers.Dense(units=self.cfg.num_units, dtype=tf.float32, name="input_projection")
            source_emb = input_project(source_emb)
            print("encoder input projection shape: {}".format(source_emb.get_shape().as_list()))
            enc_cells = self._create_encoder_cell()
            self.enc_outputs, self.enc_states = dynamic_rnn(enc_cells, source_emb, sequence_length=self.enc_seq_len,
                                                            dtype=tf.float32)
            print("encoder output shape: {}".format(self.enc_outputs.get_shape().as_list()))

        with tf.variable_scope("decoder"):
            self.max_dec_seq_len = tf.reduce_max(self.dec_seq_len, name="max_dec_seq_len")
            self.dec_cells, self.dec_init_states = self._create_decoder_cell()
            # define input and output projection layer
            input_project = tf.layers.Dense(units=self.cfg.num_units, name="input_projection")
            self.dense_layer = tf.layers.Dense(units=self.cfg.vocab_size, name="output_projection")
            if self.mode == "train":  # either "train" or "decode"
                # for training
                target_emb = input_project(target_emb)
                train_helper = TrainingHelper(target_emb, sequence_length=self.dec_seq_len, name="train_helper")
                train_decoder = BasicDecoder(self.dec_cells, helper=train_helper, output_layer=self.dense_layer,
                                             initial_state=self.dec_init_states)
                self.dec_output, _, _ = dynamic_decode(train_decoder, impute_finished=True,
                                                       maximum_iterations=self.max_dec_seq_len)
                print("decoder output shape: {} (vocab size)".format(self.dec_output.rnn_output.get_shape().as_list()))

                # for decode
                start_token = tf.ones(shape=[self.batch_size, ], dtype=tf.int32) * self.cfg.target_dict[GO]
                end_token = self.cfg.target_dict[EOS]

                def inputs_project(inputs):
                    return input_project(tf.nn.embedding_lookup(self.target_embs, inputs))

                dec_helper = GreedyEmbeddingHelper(embedding=inputs_project, start_tokens=start_token,
                                                   end_token=end_token)
                infer_decoder = BasicDecoder(self.dec_cells, helper=dec_helper, initial_state=self.dec_init_states,
                                             output_layer=self.dense_layer)
                infer_dec_output, _, _ = dynamic_decode(infer_decoder, maximum_iterations=self.cfg.maximum_iterations)
                self.dec_predicts = infer_dec_output.sample_id 
開發者ID:IsaacChanghau,項目名稱:AmusingPythonCodes,代碼行數:61,代碼來源:seq2seq_model.py


注:本文中的tensorflow.python.ops.rnn_cell.GRUCell方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。