当前位置: 首页>>代码示例>>Python>>正文


Python layers.embed_sequence方法代码示例

本文整理汇总了Python中tensorflow.contrib.layers.embed_sequence方法的典型用法代码示例。如果您正苦于以下问题:Python layers.embed_sequence方法的具体用法?Python layers.embed_sequence怎么用?Python layers.embed_sequence使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.layers的用法示例。


在下文中一共展示了layers.embed_sequence方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __attention_loss_branch

# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import embed_sequence [as 别名]
def __attention_loss_branch(self, rnn_features):
        output_embed = layers.embed_sequence(self.att_train_output,
                                             vocab_size=self.vocab_att_size,
                                             embed_dim=self.att_embed_dim, scope='embed')
        #  with tf.device('/cpu:0'):
        embeddings = tf.Variable(tf.truncated_normal(shape=[self.vocab_att_size, self.att_embed_dim],
                                                     stddev=0.1), name='decoder_embedding')
        start_tokens = tf.zeros([self.batch_size], dtype=tf.int64)

        train_helper = tf.contrib.seq2seq.TrainingHelper(output_embed, self.att_train_length)
        pred_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embeddings,
                                                               start_tokens=tf.to_int32(start_tokens),
                                                               end_token=1)

        train_outputs = self.__att_decode(train_helper, rnn_features, 'decode')
        pred_outputs = self.__att_decode(pred_helper, rnn_features, 'decode', reuse=True)

        # train_decode_result = train_outputs[0].rnn_output[0, :-1, :]
        # pred_decode_result = pred_outputs[0].rnn_output[0, :, :]

        mask = tf.cast(tf.sequence_mask(self.batch_size * [self.att_train_length[0]-1], self.att_train_length[0]), tf.float32)
        att_loss = tf.contrib.seq2seq.sequence_loss(train_outputs[0].rnn_output, self.att_target_output,
                                                weights=mask)

        return att_loss 
开发者ID:ray075hl,项目名称:attention-ocr-toy-example,代码行数:27,代码来源:ctc_joint_attention_model.py

示例2: build_compute_graph

# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import embed_sequence [as 别名]
def build_compute_graph():
    train_output_embed = encoder_net(image, scope='encode_features')
    pred_output_embed = encoder_net(image, scope='encode_features', reuse=True)

    output_embed = layers.embed_sequence(train_output, vocab_size=VOCAB_SIZE, embed_dim=VOCAB_SIZE, scope='embed')
    embeddings = tf.Variable(tf.truncated_normal(shape=[VOCAB_SIZE, VOCAB_SIZE], stddev=0.1), name='decoder_embedding')

    start_tokens = tf.zeros([BATCH_SIZE], dtype=tf.int64)

    train_helper = tf.contrib.seq2seq.TrainingHelper(output_embed, train_length)
    pred_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
        embeddings, start_tokens=tf.to_int32(start_tokens), end_token=1)
    train_outputs = decode(train_helper, train_output_embed, 'decode')
    #pred_outputs = decode(pred_helper, pred_output_embed, 'decode', reuse=True)
    pred_outputs = decode(pred_helper, train_output_embed, 'decode', reuse=True)

    train_decode_result = train_outputs[0].rnn_output[0, :-1, :]
    pred_decode_result = pred_outputs[0].rnn_output[0, :, :]

    mask = tf.cast(tf.sequence_mask(BATCH_SIZE * [train_length[0] - 1], train_length[0]),
                   tf.float32)
    att_loss = tf.contrib.seq2seq.sequence_loss(train_outputs[0].rnn_output, target_output,
                                                weights=mask)
    loss = tf.reduce_mean(att_loss)

    train_one_step = tf.train.AdadeltaOptimizer().minimize(loss)
    return loss, train_one_step, train_decode_result, pred_decode_result 
开发者ID:ray075hl,项目名称:attention-ocr-toy-example,代码行数:29,代码来源:attention_model.py

示例3: _make_encoder

# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import embed_sequence [as 别名]
def _make_encoder(self):
        """Create the encoder"""
        inputs = layers.embed_sequence(
            self.X,
            vocab_size=self.vocab_size,
            embed_dim=self.embed_dim,
            scope='embed')

        # Project to correct dimensions
        # b/c the bidirectional RNN's forward and backward
        # outputs are concatenated, the size will be 2x,
        # so halve the hidden sizes here to compensate
        inputs = tf.layers.dense(inputs, self.hidden_size//2)

        cell_fw = rnn.MultiRNNCell([
            self._make_cell(self.hidden_size//2) for _ in range(self.depth)
        ])
        cell_bw = rnn.MultiRNNCell([
            self._make_cell(self.hidden_size//2) for _ in range(self.depth)
        ])
        encoder_outputs, encoder_final_state = tf.nn.bidirectional_dynamic_rnn(
            cell_fw=cell_fw, cell_bw=cell_bw, sequence_length=self.sequence_length,
            inputs=inputs, dtype=tf.float32)

        # Concat forward and backward outputs
        encoder_outputs = tf.concat(encoder_outputs, 2)

        # Concat forward and backward layer states
        encoder_fw_states, encoder_bw_states = encoder_final_state
        encoder_final_state = []
        for fw, bw in zip(encoder_fw_states, encoder_bw_states):
            c = tf.concat([fw.c, bw.c], 1)
            h = tf.concat([fw.h, bw.h], 1)
            encoder_final_state.append(rnn.LSTMStateTuple(c=c, h=h))
        return encoder_outputs, encoder_final_state 
开发者ID:frnsys,项目名称:retrosynthesis_planner,代码行数:37,代码来源:seq2seq.py

示例4: build_infer_graph

# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import embed_sequence [as 别名]
def build_infer_graph(x, batch_size, vocab_size=VOCAB_SIZE, embedding_size=32,
                      rnn_size=128, num_layers=2, p_keep=1.0):
    """
    builds inference graph
    """
    infer_args = {"batch_size": batch_size, "vocab_size": vocab_size,
                  "embedding_size": embedding_size, "rnn_size": rnn_size,
                  "num_layers": num_layers, "p_keep": p_keep}
    logger.debug("building inference graph: %s.", infer_args)

    # other placeholders
    p_keep = tf.placeholder_with_default(p_keep, [], "p_keep")
    batch_size = tf.placeholder_with_default(batch_size, [], "batch_size")

    # embedding layer
    embed_seq = layers.embed_sequence(x, vocab_size, embedding_size)
    # shape: [batch_size, seq_len, embedding_size]
    embed_seq = tf.nn.dropout(embed_seq, keep_prob=p_keep)
    # shape: [batch_size, seq_len, embedding_size]

    # RNN layers
    cells = [rnn.LSTMCell(rnn_size) for _ in range(num_layers)]
    cells = [rnn.DropoutWrapper(cell, output_keep_prob=p_keep) for cell in cells]
    cells = rnn.MultiRNNCell(cells)
    input_state = cells.zero_state(batch_size, tf.float32)
    # shape: [num_layers, 2, batch_size, rnn_size]
    rnn_out, output_state = tf.nn.dynamic_rnn(cells, embed_seq, initial_state=input_state)
    # rnn_out shape: [batch_size, seq_len, rnn_size]
    # output_state shape: [num_layers, 2, batch_size, rnn_size]
    with tf.name_scope("lstm"):
        tf.summary.histogram("outputs", rnn_out)
        for c_state, h_state in output_state:
            tf.summary.histogram("c_state", c_state)
            tf.summary.histogram("h_state", h_state)

    # fully connected layer
    logits = layers.fully_connected(rnn_out, vocab_size, activation_fn=None)
    # shape: [batch_size, seq_len, vocab_size]

    # predictions
    with tf.name_scope("softmax"):
        probs = tf.nn.softmax(logits)
        # shape: [batch_size, seq_len, vocab_size]

    with tf.name_scope("sequence"):
        tf.summary.histogram("embeddings", embed_seq)
        tf.summary.histogram("logits", logits)

    model = {"logits": logits, "probs": probs,
             "input_state": input_state, "output_state": output_state,
             "p_keep": p_keep, "batch_size": batch_size, "infer_args": infer_args}
    return model 
开发者ID:yxtay,项目名称:char-rnn-text-generation,代码行数:54,代码来源:tf_model.py

示例5: _make_train

# 需要导入模块: from tensorflow.contrib import layers [as 别名]
# 或者: from tensorflow.contrib.layers import embed_sequence [as 别名]
def _make_train(self, decoder_cell, decoder_initial_state):
        # Assume 0 is the START token
        start_tokens = tf.zeros((self.batch_size,), dtype=tf.int32)
        y = tf.concat([tf.expand_dims(start_tokens, 1), self.y], 1)
        output_lengths = tf.reduce_sum(tf.to_int32(tf.not_equal(y, 1)), 1)

        # Reuse encoding embeddings
        inputs = layers.embed_sequence(
            y,
            vocab_size=self.vocab_size,
            embed_dim=self.embed_dim,
            scope='embed', reuse=True)

        # Prepare the decoder with the attention cell
        with tf.variable_scope('decode'):
            # Project to correct dimensions
            out_proj = tf.layers.Dense(self.vocab_size, name='output_proj')
            inputs = tf.layers.dense(inputs, self.hidden_size, name='input_proj')

            helper = seq2seq.TrainingHelper(inputs, output_lengths)
            decoder = seq2seq.BasicDecoder(
                cell=decoder_cell, helper=helper,
                initial_state=decoder_initial_state,
                output_layer=out_proj)
            max_len = tf.reduce_max(output_lengths)
            final_outputs, final_state, final_sequence_lengths = seq2seq.dynamic_decode(
                decoder=decoder, impute_finished=True, maximum_iterations=max_len)
            logits = final_outputs.rnn_output

        # Set valid timesteps to 1 and padded steps to 0,
        # so we only look at the actual sequence without the padding
        mask = tf.sequence_mask(output_lengths, maxlen=max_len, dtype=tf.float32)

        # Prioritize examples that the model was wrong on,
        # by setting weight=1 to any example where the prediction was not 1,
        # i.e. incorrect
        # weights = tf.to_float(tf.not_equal(y[:, :-1], 1))

        # Training and loss ops,
        # with gradient clipping (see [4])
        loss_op = seq2seq.sequence_loss(logits, self.y, weights=mask)
        optimizer = tf.train.AdamOptimizer(self.learning_rate)
        gradients, variables = zip(*optimizer.compute_gradients(loss_op))
        gradients, _ = tf.clip_by_global_norm(gradients, self.max_grad_norm)
        train_op = optimizer.apply_gradients(zip(gradients, variables))

        # Compute accuracy
        # Use the mask from before so we only compare
        # the relevant sequence lengths for each example
        pred = tf.argmax(logits, axis=2, output_type=tf.int32)
        pred = tf.boolean_mask(pred, mask)
        true = tf.boolean_mask(self.y, mask)
        accs = tf.cast(tf.equal(pred, true), tf.float32)
        accuracy_op = tf.reduce_mean(accs, name='acc')
        return loss_op, train_op, accuracy_op 
开发者ID:frnsys,项目名称:retrosynthesis_planner,代码行数:57,代码来源:seq2seq.py


注:本文中的tensorflow.contrib.layers.embed_sequence方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。