当前位置: 首页>>代码示例>>Python>>正文


Python utils.Progbar方法代码示例

本文整理汇总了Python中utils.Progbar方法的典型用法代码示例。如果您正苦于以下问题:Python utils.Progbar方法的具体用法?Python utils.Progbar怎么用?Python utils.Progbar使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utils的用法示例。


在下文中一共展示了utils.Progbar方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: predict

# 需要导入模块: import utils [as 别名]
# 或者: from utils import Progbar [as 别名]
def predict(self, sess, test, id_to_tag, id_to_word):
        nbatces = (len(test) + self.args.batch_size - 1) // self.args.batch_size
        prog = Progbar(target=nbatces)
        with open(self.args.predict_out, 'w+', encoding='utf8') as outfile:
            for i, (words, target_words, true_words) in enumerate(minibatches_evaluate(test, self.args.batch_size)):
                labels_pred, sequence_lengths = self.predict_batch(sess, words)

                for word, true_word, label_pred, length in zip(words, true_words, labels_pred, sequence_lengths):
                    true_word = true_word[:length]
                    lab_pred = label_pred[:length]

                    for item, tag in zip(true_word, lab_pred):
                        outfile.write(item + '\t' + id_to_tag[tag] + '\n')
                    outfile.write('\n')

                prog.update(i + 1) 
开发者ID:adapt-sjtu,项目名称:AMTTL,代码行数:18,代码来源:model_1.py

示例2: __iter__

# 需要导入模块: import utils [as 别名]
# 或者: from utils import Progbar [as 别名]
def __iter__(self):
        if self.model is not None:
            # Training started
            self.epoch_number += 1
            print 'STARTING EPOCH : (%d/%d)' % (self.epoch_number, self.n_epochs)
            sys.stdout.flush()
        self.bar = Progbar(len(self.data))
        for idx, line in enumerate(self.data):
            self.bar.update(idx + 1)
            line = line.lower() if self.lowercase else line
            yield self.l_en.tokenize_sent(line)
        if self.model is not None:
            if self.epoch_number != self.n_epochs:
                SAVE_FILE_NAME = self.model_prefix + '_iter_' + str(self.epoch_number) + '.model'
            else:
                # Last Epoch
                SAVE_FILE_NAME = self.model_prefix + '.model'
            self.model.save(SAVE_FILE_NAME) 
开发者ID:codedecde,项目名称:Recognizing-Textual-Entailment,代码行数:20,代码来源:gensim_generator.py

示例3: train_epoch

# 需要导入模块: import utils [as 别名]
# 或者: from utils import Progbar [as 别名]
def train_epoch(self, train_set, valid_data, epoch, shuffle=True):
        if shuffle:
            random.shuffle(train_set)
        train_set = batchnize_dataset(train_set, self.cfg.batch_size)
        num_batches = len(train_set)
        prog = Progbar(target=num_batches)
        for i, batch_data in enumerate(train_set):
            feed_dict = self._get_feed_dict(batch_data, emb_keep_prob=self.cfg["emb_keep_prob"],
                                            rnn_keep_prob=self.cfg["rnn_keep_prob"],
                                            attn_keep_prob=self.cfg["attn_keep_prob"], is_train=True, lr=self.cfg["lr"])
            _, train_loss, summary = self.sess.run([self.train_op, self.loss, self.summary], feed_dict=feed_dict)
            cur_step = (epoch - 1) * num_batches + (i + 1)
            prog.update(i + 1, [("Global Step", int(cur_step)), ("Train Loss", train_loss)])
            self.train_writer.add_summary(summary, cur_step)
            if i % 100 == 0:
                valid_feed_dict = self._get_feed_dict(valid_data)
                valid_summary = self.sess.run(self.summary, feed_dict=valid_feed_dict)
                self.test_writer.add_summary(valid_summary, cur_step) 
开发者ID:IsaacChanghau,项目名称:neural_sequence_labeling,代码行数:20,代码来源:multi_attention_model.py

示例4: train_epoch

# 需要导入模块: import utils [as 别名]
# 或者: from utils import Progbar [as 别名]
def train_epoch(self, train_set, valid_data, epoch):
        num_batches = len(train_set)
        prog = Progbar(target=num_batches)
        total_cost, total_samples = 0, 0
        for i, batch in enumerate(train_set):
            feed_dict = self._get_feed_dict(batch, is_train=True, keep_prob=self.cfg["keep_prob"], lr=self.cfg["lr"])
            _, train_loss, summary = self.sess.run([self.train_op, self.loss, self.summary], feed_dict=feed_dict)
            cur_step = (epoch - 1) * num_batches + (i + 1)
            total_cost += train_loss
            total_samples += np.array(batch["words"]).shape[0]
            prog.update(i + 1, [("Global Step", int(cur_step)), ("Train Loss", train_loss),
                                ("Perplexity", np.exp(total_cost / total_samples))])
            self.train_writer.add_summary(summary, cur_step)
            if i % 100 == 0:
                valid_feed_dict = self._get_feed_dict(valid_data)
                valid_summary = self.sess.run(self.summary, feed_dict=valid_feed_dict)
                self.test_writer.add_summary(valid_summary, cur_step) 
开发者ID:IsaacChanghau,项目名称:neural_sequence_labeling,代码行数:19,代码来源:punct_attentive_model.py

示例5: _valid_error

# 需要导入模块: import utils [as 别名]
# 或者: from utils import Progbar [as 别名]
def _valid_error(data_loader, model, criterion, epoch, opt):
    progbar = Progbar(title='Validating', target=len(data_loader), batch_size=data_loader.batch_size,
                      total_examples=len(data_loader.dataset))
    model.eval()

    losses = []

    # Note that the data should be shuffled every time
    for i, batch in enumerate(data_loader):
        # if i >= 100:
        #     break

        one2many_batch, one2one_batch = batch
        src, trg, trg_target, trg_copy_target, src_ext, oov_lists = one2one_batch

        if torch.cuda.is_available():
            src                = src.cuda()
            trg                = trg.cuda()
            trg_target         = trg_target.cuda()
            trg_copy_target    = trg_copy_target.cuda()
            src_ext            = src_ext.cuda()

        decoder_log_probs, _, _ = model.forward(src, trg, src_ext)

        if not opt.copy_model:
            loss = criterion(
                decoder_log_probs.contiguous().view(-1, opt.vocab_size),
                trg_target.contiguous().view(-1)
            )
        else:
            loss = criterion(
                decoder_log_probs.contiguous().view(-1, opt.vocab_size + opt.max_unk_words),
                trg_copy_target.contiguous().view(-1)
            )
        losses.append(loss.data[0])

        progbar.update(epoch, i, [('valid_loss', loss.data[0]), ('PPL', loss.data[0])])

    return losses 
开发者ID:memray,项目名称:seq2seq-keyphrase-pytorch,代码行数:41,代码来源:train_rl.py

示例6: evaluate_greedy

# 需要导入模块: import utils [as 别名]
# 或者: from utils import Progbar [as 别名]
def evaluate_greedy(model, data_loader, test_examples, opt):
    model.eval()

    logging.info('======================  Checking GPU Availability  =========================')
    if torch.cuda.is_available():
        logging.info('Running on GPU!')
        model.cuda()
    else:
        logging.info('Running on CPU!')

    logging.info('======================  Start Predicting  =========================')
    progbar = Progbar(title='Testing', target=len(data_loader), batch_size=data_loader.batch_size,
                      total_examples=len(data_loader.dataset))

    '''
    Note here each batch only contains one data example, thus decoder_probs is flattened
    '''
    for i, (batch, example) in enumerate(zip(data_loader, test_examples)):
        src = batch.src

        logging.info('======================  %d  =========================' % (i + 1))
        logging.info('\nSource text: \n %s\n' % (' '.join([opt.id2word[wi] for wi in src.data.numpy()[0]])))

        if torch.cuda.is_available():
            src.cuda()

        # trg = Variable(torch.from_numpy(np.zeros((src.size(0), opt.max_sent_length), dtype='int64')))
        trg = Variable(torch.LongTensor([[opt.word2id[pykp.io.BOS_WORD]] * opt.max_sent_length]))

        max_words_pred = model.greedy_predict(src, trg)
        progbar.update(None, i, [])

        sentence_pred = [opt.id2word[x] for x in max_words_pred]
        sentence_real = example['trg_str']

        if '</s>' in sentence_real:
            index = sentence_real.index('</s>')
            sentence_pred = sentence_pred[:index]

        logging.info('\t\tPredicted : %s ' % (' '.join(sentence_pred)))
        logging.info('\t\tReal : %s ' % (sentence_real)) 
开发者ID:memray,项目名称:seq2seq-keyphrase-pytorch,代码行数:43,代码来源:evaluate.py

示例7: _valid_error

# 需要导入模块: import utils [as 别名]
# 或者: from utils import Progbar [as 别名]
def _valid_error(data_loader, model, criterion, epoch, opt):
    progbar = Progbar(title='Validating', target=len(data_loader), batch_size=data_loader.batch_size,
                      total_examples=len(data_loader.dataset))
    model.eval()

    losses = []

    # Note that the data should be shuffled every time
    for i, batch in enumerate(data_loader):
        # if i >= 100:
        #     break

        one2many_batch, one2one_batch = batch
        src, trg, trg_target, trg_copy_target, src_ext, oov_lists = one2one_batch

        if torch.cuda.is_available():
            src = src.cuda()
            trg = trg.cuda()
            trg_target = trg_target.cuda()
            trg_copy_target = trg_copy_target.cuda()
            src_ext = src_ext.cuda()

        decoder_log_probs, _, _ = model.forward(src, trg, src_ext)

        if not opt.copy_attention:
            loss = criterion(
                decoder_log_probs.contiguous().view(-1, opt.vocab_size),
                trg_target.contiguous().view(-1)
            )
        else:
            loss = criterion(
                decoder_log_probs.contiguous().view(-1, opt.vocab_size + opt.max_unk_words),
                trg_copy_target.contiguous().view(-1)
            )
        losses.append(loss.data[0])

        progbar.update(epoch, i, [('valid_loss', loss.data[0]), ('PPL', loss.data[0])])

    return losses 
开发者ID:memray,项目名称:seq2seq-keyphrase-pytorch,代码行数:41,代码来源:train.py

示例8: run_evaluate

# 需要导入模块: import utils [as 别名]
# 或者: from utils import Progbar [as 别名]
def run_evaluate(self, sess, test, tags, target='src'):
        accs = []
        correct_preds, total_correct, total_preds = 0., 0., 0.
        nbatces = (len(test) + self.args.batch_size - 1) // self.args.batch_size
        prog = Progbar(target=nbatces)
        for i, (words, labels, target_words) in enumerate(minibatches(test, self.args.batch_size)):
            if target == 'src':
                labels_pred, sequence_lengths = self.predict_batch(sess, words, mode=target, is_training=False)
            else:
                labels_pred, sequence_lengths = self.predict_batch(sess, None, words, mode=target, is_training=False)

            for lab, label_pred, length in zip(labels, labels_pred, sequence_lengths):
                lab = lab[:length]
                lab_pred = label_pred[:length]
                accs += [a == b for (a, b) in zip(lab, lab_pred)]
                lab_chunks = set(get_chunks(lab, tags))
                lab_pred_chunks = set(get_chunks(lab_pred, tags))
                correct_preds += len(lab_chunks & lab_pred_chunks)
                total_preds += len(lab_pred_chunks)
                total_correct += len(lab_chunks)

            prog.update(i + 1)

        p = correct_preds / total_preds if correct_preds > 0 else 0
        r = correct_preds / total_correct if correct_preds > 0 else 0
        f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0
        acc = np.mean(accs)
        return acc, p, r, f1 
开发者ID:adapt-sjtu,项目名称:AMTTL,代码行数:30,代码来源:model_1.py

示例9: train_epoch

# 需要导入模块: import utils [as 别名]
# 或者: from utils import Progbar [as 别名]
def train_epoch(self, train_set, valid_data, epoch):
        num_batches = len(train_set)
        prog = Progbar(target=num_batches)
        for i, batch_data in enumerate(train_set):
            feed_dict = self._get_feed_dict(batch_data, is_train=True, keep_prob=self.cfg["keep_prob"],
                                            lr=self.cfg["lr"])
            _, train_loss, summary = self.sess.run([self.train_op, self.loss, self.summary], feed_dict=feed_dict)
            cur_step = (epoch - 1) * num_batches + (i + 1)
            prog.update(i + 1, [("Global Step", int(cur_step)), ("Train Loss", train_loss)])
            self.train_writer.add_summary(summary, cur_step)
            if i % 100 == 0:
                valid_feed_dict = self._get_feed_dict(valid_data)
                valid_summary = self.sess.run(self.summary, feed_dict=valid_feed_dict)
                self.test_writer.add_summary(valid_summary, cur_step) 
开发者ID:IsaacChanghau,项目名称:neural_sequence_labeling,代码行数:16,代码来源:blstm_cnn_crf_model.py

示例10: train

# 需要导入模块: import utils [as 别名]
# 或者: from utils import Progbar [as 别名]
def train(self, trainset, devset, testset, batch_size=64, epochs=50, shuffle=True):
        self.logger.info('Start training...')
        init_lr = self.cfg.lr  # initial learning rate, used for decay learning rate
        best_score = 0.0  # record the best score
        best_score_epoch = 1  # record the epoch of the best score obtained
        no_imprv_epoch = 0  # no improvement patience counter
        for epoch in range(self.start_epoch, epochs + 1):
            self.logger.info('Epoch %2d/%2d:' % (epoch, epochs))
            progbar = Progbar(target=(len(trainset) + batch_size - 1) // batch_size)  # number of batches
            if shuffle:
                np.random.shuffle(trainset)  # shuffle training dataset each epoch
            # training each epoch
            for i, (words, labels) in enumerate(batch_iter(trainset, batch_size)):
                feed_dict = self._get_feed_dict(words, labels, lr=self.cfg.lr, is_train=True)
                _, train_loss = self.sess.run([self.train_op, self.loss], feed_dict=feed_dict)
                progbar.update(i + 1, [("train loss", train_loss)])
            if devset is not None:
                self.evaluate(devset, batch_size)
            cur_score = self.evaluate(testset, batch_size, is_devset=False)
            # learning rate decay
            if self.cfg.decay_lr:
                self.cfg.lr = init_lr / (1 + self.cfg.lr_decay * epoch)
            # performs model saving and evaluating on test dataset
            if cur_score > best_score:
                no_imprv_epoch = 0
                self.save_session(epoch)
                best_score = cur_score
                best_score_epoch = epoch
                self.logger.info(' -- new BEST score on TEST dataset: {:05.3f}'.format(best_score))
            else:
                no_imprv_epoch += 1
                if no_imprv_epoch >= self.cfg.no_imprv_patience:
                    self.logger.info('early stop at {}th epoch without improvement for {} epochs, BEST score: '
                                     '{:05.3f} at epoch {}'.format(epoch, no_imprv_epoch, best_score, best_score_epoch))
                    break
        self.logger.info('Training process done...') 
开发者ID:IsaacChanghau,项目名称:Dense_BiLSTM,代码行数:38,代码来源:model.py

示例11: run_epoch

# 需要导入模块: import utils [as 别名]
# 或者: from utils import Progbar [as 别名]
def run_epoch(self, sess, src_train, src_dev, tags, target_train, target_dev, n_epoch_noimprove):
        nbatces = (len(target_train) + self.target_batch_size - 1) // self.target_batch_size
        prog = Progbar(target=nbatces)
        total_loss = 0

        src = minibatches(src_train, self.src_batch_size, circle=True)
        target = minibatches(target_train, self.target_batch_size, circle=True)

        for i in range(nbatces):
            src_words, src_tags, _ = next(src)
            target_words, target_tags, _ = next(target)
            labels = src_tags + target_tags

            feed_dict, _ = self.get_feed_dict(src_words, labels, target_words, self.args.learning_rate,
                                              self.args.dropout, self.src_batch_size, is_training=True)

            if self.args.penalty_ratio > 0:
                _, src_crf_loss, target_crf_loss, penalty_loss, loss = sess.run(
                    [self.train_op, self.src_crf_loss, self.target_crf_loss, self.penalty_loss, self.loss],
                    feed_dict=feed_dict)
                try:
                    prog.update(i + 1,
                                [("train loss", loss[0]), ("src crf", src_crf_loss), ("target crf", target_crf_loss),
                                 ("{} loss".format(self.args.penalty), penalty_loss)])
                except:
                    prog.update(i + 1,
                                [("train loss", loss), ("src crf", src_crf_loss), ("target crf", target_crf_loss),
                                 ("{} loss".format(self.args.penalty), penalty_loss)])
            else:
                _, src_crf_loss, target_crf_loss, loss = sess.run(
                    [self.train_op, self.src_crf_loss, self.target_crf_loss, self.loss],
                    feed_dict=feed_dict)
                try:
                    prog.update(i + 1,
                                [("train loss", loss[0]), ("src crf", src_crf_loss), ("target crf", target_crf_loss)])
                except:
                    prog.update(i + 1,
                                [("train loss", loss), ("src crf", src_crf_loss), ("target crf", target_crf_loss)])
            total_loss += loss

        self.info['loss'] += [total_loss / nbatces]
        acc, p, r, f1 = self.run_evaluate(sess, target_train, tags, target='target')
        self.info['dev'].append((acc, p, r, f1))
        self.logger.critical(
            "target train acc {:04.2f}  f1  {:04.2f}  p {:04.2f}  r  {:04.2f}".format(100 * acc, 100 * f1, 100 * p,
                                                                                      100 * r))
        acc, p, r, f1 = self.run_evaluate(sess, target_dev, tags, target='target')
        self.info['dev'].append((acc, p, r, f1))
        self.logger.info(
            "dev acc {:04.2f}  f1  {:04.2f}  p {:04.2f}  r  {:04.2f}".format(100 * acc, 100 * f1, 100 * p, 100 * r))
        return acc, p, r, f1 
开发者ID:adapt-sjtu,项目名称:AMTTL,代码行数:53,代码来源:model_1.py


注:本文中的utils.Progbar方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。