当前位置: 首页>>代码示例>>Python>>正文


Python legacy_seq2seq.sequence_loss_by_example方法代码示例

本文整理汇总了Python中tensorflow.contrib.legacy_seq2seq.sequence_loss_by_example方法的典型用法代码示例。如果您正苦于以下问题:Python legacy_seq2seq.sequence_loss_by_example方法的具体用法?Python legacy_seq2seq.sequence_loss_by_example怎么用?Python legacy_seq2seq.sequence_loss_by_example使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.legacy_seq2seq的用法示例。


在下文中一共展示了legacy_seq2seq.sequence_loss_by_example方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: generate_sequence_output

# 需要导入模块: from tensorflow.contrib import legacy_seq2seq [as 别名]
# 或者: from tensorflow.contrib.legacy_seq2seq import sequence_loss_by_example [as 别名]
def generate_sequence_output(num_encoder_symbols,
                             encoder_outputs,
                             encoder_state,
                             targets,
                             sequence_length,
                             num_decoder_symbols,
                             weights,
                             buckets,
                             softmax_loss_function=None,
                             per_example_loss=False,
                             name=None,
                             use_attention=False):
    if len(targets) < buckets[-1][1]:
        raise ValueError("Length of targets (%d) must be at least that of last"
                         "bucket (%d)." % (len(targets), buckets[-1][1]))

    all_inputs = encoder_outputs + targets + weights
    with tf.name_scope(name, "model_with_buckets", all_inputs):
        with tf.variable_scope("decoder_sequence_output", reuse=None):
            logits, attention_weights = attention_RNN(encoder_outputs,
                                                      encoder_state,
                                                      num_decoder_symbols,
                                                      sequence_length,
                                                      use_attention=use_attention)
            if per_example_loss is None:
                assert len(logits) == len(targets)
                # We need to make target and int64-tensor and set its shape.
                bucket_target = [tf.reshape(tf.to_int64(x), [-1]) for x in targets]
                crossent = sequence_loss_by_example(
                    logits, bucket_target, weights,
                    softmax_loss_function=softmax_loss_function)
            else:
                assert len(logits) == len(targets)
                bucket_target = [tf.reshape(tf.to_int64(x), [-1]) for x in targets]
                crossent = sequence_loss(
                    logits, bucket_target, weights,
                    softmax_loss_function=softmax_loss_function)

    return logits, crossent 
开发者ID:sliderSun,项目名称:pynlp,代码行数:41,代码来源:seq_labeling.py

示例2: __init__

# 需要导入模块: from tensorflow.contrib import legacy_seq2seq [as 别名]
# 或者: from tensorflow.contrib.legacy_seq2seq import sequence_loss_by_example [as 别名]
def __init__(self, args, data, infer=False):
        if infer:
            args.batch_size = 1
            args.seq_length = 1
        with tf.name_scope('inputs'):
            self.input_data = tf.placeholder(
                tf.int32, [args.batch_size, args.seq_length])
            self.target_data = tf.placeholder(
                tf.int32, [args.batch_size, args.seq_length])

        with tf.name_scope('model'):
            self.cell = []
            for n in range(args.num_layers):
                self.cell.append(tf.nn.rnn_cell.LSTMCell(args.state_size))
            self.cell = tf.nn.rnn_cell.MultiRNNCell(self.cell)
            self.initial_state = self.cell.zero_state(
                args.batch_size, tf.float32)
            with tf.variable_scope('rnnlm'):
                w = tf.get_variable(
                    'softmax_w', [args.state_size, data.vocab_size])
                b = tf.get_variable('softmax_b', [data.vocab_size])
                with tf.device("/cpu:0"):
                    embedding = tf.get_variable(
                        'embedding', [data.vocab_size, args.state_size])
                    inputs = tf.nn.embedding_lookup(embedding, self.input_data)
            outputs, last_state = tf.nn.dynamic_rnn(
                self.cell, inputs, initial_state=self.initial_state)

        with tf.name_scope('loss'):
            output = tf.reshape(outputs, [-1, args.state_size])

            self.logits = tf.matmul(output, w) + b
            self.probs = tf.nn.softmax(self.logits)
            self.last_state = last_state

            targets = tf.reshape(self.target_data, [-1])
            loss = seq2seq.sequence_loss_by_example([self.logits],
                                                    [targets],
                                                    [tf.ones_like(targets, dtype=tf.float32)])
            self.cost = tf.reduce_sum(loss) / args.batch_size
            tf.summary.scalar('loss', self.cost)

        with tf.name_scope('optimize'):
            self.lr = tf.placeholder(tf.float32, [])
            tf.summary.scalar('learning_rate', self.lr)

            optimizer = tf.train.AdamOptimizer(self.lr)
            tvars = tf.trainable_variables()
            grads = tf.gradients(self.cost, tvars)
            for g in grads:
                tf.summary.histogram(g.name, g)
            grads, _ = tf.clip_by_global_norm(grads, args.grad_clip)

            self.train_op = optimizer.apply_gradients(zip(grads, tvars))
            self.merged_op = tf.summary.merge_all() 
开发者ID:ZubinGou,项目名称:AI_Poet_Totoro,代码行数:57,代码来源:main.py

示例3: build

# 需要导入模块: from tensorflow.contrib import legacy_seq2seq [as 别名]
# 或者: from tensorflow.contrib.legacy_seq2seq import sequence_loss_by_example [as 别名]
def build(self):
        self.inputs = tf.placeholder(tf.int32, [self.batch_size, None])
        self.targets = tf.placeholder(tf.int32, [self.batch_size, None])
        self.keep_prob = tf.placeholder(tf.float32)
        self.seq_len = tf.placeholder(tf.int32, [self.batch_size])
        self.learning_rate = tf.placeholder(tf.float64)

        with tf.variable_scope('rnn'):
            w = tf.get_variable("softmax_w", [self.num_units, self.data.words_size])
            b = tf.get_variable("softmax_b", [self.data.words_size])

            embedding = tf.get_variable("embedding", [self.data.words_size, self.num_units])
            inputs = tf.nn.embedding_lookup(embedding, self.inputs)

        self.cell = tf.nn.rnn_cell.MultiRNNCell([self.unit() for _ in range(self.num_layer)])
        self.init_state = self.cell.zero_state(self.batch_size, dtype=tf.float32)
        output, self.final_state = tf.nn.dynamic_rnn(self.cell,
                                                inputs=inputs,
                                                sequence_length=self.seq_len,
                                                initial_state=self.init_state,
                                                scope='rnn')
        with tf.name_scope('fc'):
            y = tf.reshape(output, [-1, self.num_units])
            logits = tf.matmul(y, w) + b

        with tf.name_scope('softmax'):
            prob = tf.nn.softmax(logits)

        self.prob = tf.reshape(prob, [self.batch_size, -1])
        pre = tf.argmax(prob, 1)
        self.pre = tf.reshape(pre, [self.batch_size, -1])

        targets = tf.reshape(self.targets, [-1])
        with tf.name_scope('loss'):
            loss = seq2seq.sequence_loss_by_example([logits],
                                                [targets],
                                                [tf.ones_like(targets, dtype=tf.float32)])
            self.loss = tf.reduce_mean(loss)

        with tf.name_scope('summary'):
            tf.summary.scalar('loss', self.loss)
            self.merged_summary = tf.summary.merge_all()

        with tf.name_scope('optimizer'):
            optimizer = tf.train.AdamOptimizer(self.learning_rate)
            tvars = tf.trainable_variables()
            grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), 5)
            self.train_op = optimizer.apply_gradients(zip(grads, tvars)) 
开发者ID:stardut,项目名称:Text-Generate-RNN,代码行数:50,代码来源:net.py

示例4: __init__

# 需要导入模块: from tensorflow.contrib import legacy_seq2seq [as 别名]
# 或者: from tensorflow.contrib.legacy_seq2seq import sequence_loss_by_example [as 别名]
def __init__(self, args, training=True):
        """Initialize RNN model"""
        self.args = args
        if not training:
            args.batch_size = 1
            args.seq_length = 1

        cell_fn = rnn.GRUCell
        cells = [cell_fn(args.rnn_size) for _ in range(args.num_layers)]

        self.cell = cell = rnn.MultiRNNCell(cells, state_is_tuple=True)
        self.input_data = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])
        self.targets = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])
        self.initial_state = cell.zero_state(args.batch_size, tf.float32)

        with tf.variable_scope('rnn'):
            softmax_w = tf.get_variable("softmax_w", [args.rnn_size, args.vocab_size])
            softmax_b = tf.get_variable("softmax_b", [args.vocab_size])

        embedding = tf.get_variable("embedding", [args.vocab_size, args.rnn_size])
        inputs = tf.nn.embedding_lookup(embedding, self.input_data)

        inputs = tf.split(inputs, args.seq_length, 1)
        inputs = [tf.squeeze(input_, [1]) for input_ in inputs]

        def loop(prev, _):
            prev = tf.matmul(prev, softmax_w) + softmax_b
            prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
            return tf.nn.embedding_lookup(embedding, prev_symbol)

        outputs, last_state = legacy_seq2seq.rnn_decoder(inputs, self.initial_state, cell,
                                                         loop_function=loop if not training else None, scope='rnnlm')
        output = tf.reshape(tf.concat(outputs, 1), [-1, args.rnn_size])

        self.logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
        self.probs = tf.nn.softmax(self.logits)
        loss = legacy_seq2seq.sequence_loss_by_example(
                [self.logits],
                [tf.reshape(self.targets, [-1])],
                [tf.ones([args.batch_size * args.seq_length])])

        with tf.name_scope('loss'):
            self.cost = tf.reduce_sum(loss) / args.batch_size / args.seq_length

        self.final_state = last_state
        self.learning_rate = tf.Variable(0.0, trainable=False)
        tvars = tf.trainable_variables()
        grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars), args.grad_clip)

        with tf.name_scope('optimization'):
            optimizer = tf.train.AdamOptimizer(self.learning_rate)
        self.train_op = optimizer.apply_gradients(zip(grads, tvars)) 
开发者ID:CornellDataScience,项目名称:Deep-Learning-Course,代码行数:54,代码来源:rnn_model.py


注:本文中的tensorflow.contrib.legacy_seq2seq.sequence_loss_by_example方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。