當前位置: 首頁>>代碼示例>>Python>>正文


Python generic_utils.Progbar方法代碼示例

本文整理匯總了Python中keras.utils.generic_utils.Progbar方法的典型用法代碼示例。如果您正苦於以下問題:Python generic_utils.Progbar方法的具體用法?Python generic_utils.Progbar怎麽用?Python generic_utils.Progbar使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.utils.generic_utils的用法示例。


在下文中一共展示了generic_utils.Progbar方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: results

# 需要導入模塊: from keras.utils import generic_utils [as 別名]
# 或者: from keras.utils.generic_utils import Progbar [as 別名]
def results(self):
        recs = []
        columns = ['gram_loss', 'ungram_loss', 'correct'] + dependency_fields
        self.model.model._make_test_function()
        progbar = Progbar(len(self.deps_test))
        for i, dep in enumerate(self.deps_test):
            inp = np.zeros((1, self.maxlen))
            v = int(dep['verb_index']) - 1
            tokens = dep[self.field].split()[:v+1]
            ints = [self.vocab_to_ints[x] for x in tokens]
            try:
                ungram = self.vocab_to_ints[self.inflect_verb[tokens[v]]]
            except KeyError:   # reinflected form not in vocabulary: ignore
                continue
            n = len(ints) - 1
            inp[0, -n:] = ints[:-1]
            gram_loss = self.model.test_on_batch(inp, np.array([ints[v]]))
            ungram_loss = self.model.test_on_batch(inp, np.array([ungram]))
            recs.append((gram_loss, ungram_loss, gram_loss < ungram_loss) +
                        tuple(dep[x] for x in dependency_fields))
            if i % 16 == 0:
                progbar.update(i)

        self.test_results = pd.DataFrame(recs, columns=columns) 
開發者ID:TalLinzen,項目名稱:rnn_agreement,代碼行數:26,代碼來源:language_model.py

示例2: play

# 需要導入模塊: from keras.utils import generic_utils [as 別名]
# 或者: from keras.utils.generic_utils import Progbar [as 別名]
def play(self, env, epoch=1, batch_size=1, visualize=None, verbose=1):
        print("Free play started!")
        frames = np.zeros((0, ) + env.observe_image().shape[1:])
        frames = frames.transpose(0, 2, 3, 1)
        progbar = Progbar(epoch)

        for e in xrange(epoch):
            # reset environment on each epoch
            env.reset()
            game_over = False
            loss = 0
            rewards = 0

            # get initial observation, start game
            obs_t = env.observe()
            while not game_over:
                obs_tm1 = obs_t

                # get next action
                action = self.policy(obs_tm1, train=False)

                # apply action, get rewareds and new state
                obs_t, reward, game_over = env.update(action)
                rewards += reward

                frame_t = env.observe_image().transpose(0, 2, 3, 1)
                frames = np.concatenate([frames, frame_t], axis=0)

            if verbose == 1:
                progbar.add(1, values=[("loss", loss), ("rewards", rewards)])


        if visualize:
            from agnez.video import make_gif
            print("Making gif!")
            frames = np.repeat(frames, 3, axis=-1)
            make_gif(frames[:visualize['n_frames']],
                     filepath=visualize['filepath'], gray=visualize['gray'], interpolation='none')
            print("See your gif at {}".format(visualize['filepath'])) 
開發者ID:EderSantana,項目名稱:X,代碼行數:41,代碼來源:agent.py

示例3: test_progbar

# 需要導入模塊: from keras.utils import generic_utils [as 別名]
# 或者: from keras.utils.generic_utils import Progbar [as 別名]
def test_progbar():
    values_s = [None,
                [['key1', 1], ['key2', 1e-4]],
                [['key3', 1], ['key2', 1e-4]]]

    for target in (len(values_s) - 1, None):
        for verbose in (0, 1, 2):
            bar = Progbar(target, width=30, verbose=verbose, interval=0.05)
            for current, values in enumerate(values_s):
                bar.update(current, values=values) 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:12,代碼來源:generic_utils_test.py

示例4: new_generate_dataset

# 需要導入模塊: from keras.utils import generic_utils [as 別名]
# 或者: from keras.utils.generic_utils import Progbar [as 別名]
def new_generate_dataset(dataset, samples, gen_test, beam_size, hypo_len, noise_size, cmodel):

    vgen = val_generator(dataset, gen_test, beam_size, hypo_len, noise_size)
    p = Progbar(samples)
    batchez = []
    while p.seen_so_far < samples:
        batch = next(vgen)
        probs = cmodel.predict([batch[0], batch[1]], verbose = 0)
        batch += (probs,)

        p.add(len(batch[0]))
        batchez.append(batch)
    return merge_result_batches(batchez) 
開發者ID:jstarc,項目名稱:nli_generation,代碼行數:15,代碼來源:augment.py

示例5: validate

# 需要導入模塊: from keras.utils import generic_utils [as 別名]
# 或者: from keras.utils.generic_utils import Progbar [as 別名]
def validate(dev, gen_test, beam_size, hypo_len, samples, noise_size, glove, cmodel = None, adverse = False, 
                 diverse = False):
    vgen = val_generator(dev, gen_test, beam_size, hypo_len, noise_size)
    p = Progbar(samples)
    batchez = []
    while p.seen_so_far < samples:
        batch = next(vgen)
        preplexity = np.mean(np.power(2, batch[2]))
        loss = np.mean(batch[2])
        losses = [('hypo_loss',loss),('perplexity', preplexity)]
        if cmodel is not None:
            ceval = cmodel.evaluate([batch[0], batch[1]], batch[4], verbose = 0)
            losses += [('class_loss', ceval[0]), ('class_acc', ceval[1])]
            probs = cmodel.predict([batch[0], batch[1]], verbose = 0)
            losses += [('class_entropy', np.mean(-np.sum(probs * np.log(probs), axis=1)))]
        
        p.add(len(batch[0]), losses)
        batchez.append(batch)
    batchez = merge_result_batches(batchez)
    
    res = {}
    if adverse:
        val_loss = adverse_validation(dev, batchez, glove)
        print 'adverse_loss:', val_loss
        res['adverse_loss'] = val_loss
    if diverse:
        div, _, _, _ = diversity(dev, gen_test, beam_size, hypo_len, noise_size, 64, 32)
        res['diversity'] = div
    print
    for val in p.unique_values:
        arr = p.sum_values[val]
        res[val] = arr[0] / arr[1]
    return res 
開發者ID:jstarc,項目名稱:nli_generation,代碼行數:35,代碼來源:generative_alg.py

示例6: diversity

# 需要導入模塊: from keras.utils import generic_utils [as 別名]
# 或者: from keras.utils.generic_utils import Progbar [as 別名]
def diversity(dev, gen_test, beam_size, hypo_len, noise_size, per_premise, samples):
    step = len(dev[0]) / samples
    sind = [i * step for i in range(samples)]
    p = Progbar(per_premise * samples)
    for i in sind:
        hypos = []
        unique_words = []
        hypo_list = []
        premise = dev[0][i]
        prem_list = set(cut_zeros(list(premise)))        
        while len(hypos) < per_premise:
            label = np.argmax(dev[2][i])
            words = single_generate(premise, label, gen_test, beam_size, hypo_len, noise_size)
            hypos += [str(ex) for ex in words]
            unique_words += [int(w) for ex in words for w in ex if w > 0]
            hypo_list += [set(cut_zeros(list(ex))) for ex in words]
        
        jacks = []  
        prem_jacks = []
        for u in range(len(hypo_list)):
            sim_prem = len(hypo_list[u] & prem_list)/float(len(hypo_list[u] | prem_list))
            prem_jacks.append(sim_prem)
            for v in range(u+1, len(hypo_list)):
                sim = len(hypo_list[u] & hypo_list[v])/float(len(hypo_list[u] | hypo_list[v]))
                jacks.append(sim)
        avg_dist_hypo = 1 -  np.mean(jacks)
        avg_dist_prem = 1 -  np.mean(prem_jacks)
        d = entropy(Counter(hypos).values()) 
        w = entropy(Counter(unique_words).values())
        p.add(len(hypos), [('diversity', d),('word_entropy', w),('avg_dist_hypo', avg_dist_hypo), ('avg_dist_prem', avg_dist_prem)])
    arrd = p.sum_values['diversity']
    arrw = p.sum_values['word_entropy']
    arrj = p.sum_values['avg_dist_hypo']
    arrp = p.sum_values['avg_dist_prem']
    
    return arrd[0] / arrd[1], arrw[0] / arrw[1], arrj[0] / arrj[1],  arrp[0] / arrp[1] 
開發者ID:jstarc,項目名稱:nli_generation,代碼行數:38,代碼來源:generative_alg.py

示例7: test_points

# 需要導入模塊: from keras.utils import generic_utils [as 別名]
# 或者: from keras.utils.generic_utils import Progbar [as 別名]
def test_points(premises, labels, noises, gtest, cmodel, hypo_len):
    p = Progbar(len(premises))
    hypos = []
    bs = 64 
    for i in range(len(labels) / bs):
        words, _  = generative_predict_beam(gtest, premises[i * bs: (i+1)*bs], 
                          noises[i * bs: (i+1)*bs,None,:], labels[i * bs: (i+1)*bs], True, hypo_len)
        hypos.append(words)
        p.add(len(words))
    hypos = np.vstack(hypos)
    cpreds = cmodel.evaluate([premises[:len(hypos)], hypos], labels[:len(hypos)])
    print cpreds 
開發者ID:jstarc,項目名稱:nli_generation,代碼行數:14,代碼來源:visualize.py

示例8: reset

# 需要導入模塊: from keras.utils import generic_utils [as 別名]
# 或者: from keras.utils.generic_utils import Progbar [as 別名]
def reset(self):
        """ Reset statistics """
        self.interval_start = timeit.default_timer()
        self.progbar = Progbar(target=self.interval)
        self.metrics = []
        self.infos = []
        self.info_names = None
        self.episode_rewards = [] 
開發者ID:keras-rl,項目名稱:keras-rl,代碼行數:10,代碼來源:callbacks.py

示例9: evaluate

# 需要導入模塊: from keras.utils import generic_utils [as 別名]
# 或者: from keras.utils.generic_utils import Progbar [as 別名]
def evaluate(self, howmany=1000):
        self.model.model._make_test_function()
        random.seed(0)
        shuffled = self.deps_test[:]
        random.shuffle(shuffled)
        shuffled = shuffled[:howmany]
        X_test = []
        Y_test = []

        for dep in shuffled:
            tokens = self.process_single_dependency(dep)
            ints = []
            for token in tokens:
                if token not in self.vocab_to_ints:
                    # zero is for pad
                    x = self.vocab_to_ints[token] = len(self.vocab_to_ints) + 1
                    self.ints_to_vocab[x] = token
                ints.append(self.vocab_to_ints[token])

            first = 1
            for i in range(first, len(ints) - 1):
                X_test.append(ints[:i])
                Y_test.append(ints[i])

        test_loss = []
        end = int(float(len(X_test) / self.batch_size))
        progbar = Progbar(end)
        for i in range(0, len(X_test), self.batch_size):
            inp = sequence.pad_sequences(X_test[i:i+self.batch_size],
                                         maxlen=self.maxlen)
            out = Y_test[i:i+self.batch_size]
            output = self.model.test_on_batch(inp, out)
            test_loss.append(output)
            j = int(float(i) / self.batch_size)
            if j % 16 == 0:
                progbar.update(j)
        progbar.update(end)

        return np.mean(test_loss) 
開發者ID:TalLinzen,項目名稱:rnn_agreement,代碼行數:41,代碼來源:language_model.py

示例10: collect_agreement

# 需要導入模塊: from keras.utils import generic_utils [as 別名]
# 或者: from keras.utils.generic_utils import Progbar [as 別名]
def collect_agreement(self):
        n_deps = 0
        self.deps = []
        random.seed(1)

        if self.verbose and self.stop_after:
            from keras.utils.generic_utils import Progbar
            progbar = Progbar(self.stop_after)

        for i, sent in enumerate(tokenize_blanks(zread(self.infile)), 1):
            if self.stop_after is not None and n_deps >= self.stop_after:
                break
            if i % (self.skip + 1) != 0:
                continue

            # only one dependency per sentence
            deps = self.find_nsubj_agreement(sent)
            if len(deps) == 0:
                continue
            dep = random.choice(deps)
            if dep['subj_index'] > dep['verb_index']:
                continue
            if (dep['subj_pos'] == 'NN' and dep['verb_pos'] == 'VBP' or 
                dep['subj_pos'] == 'NNS' and dep['verb_pos'] == 'VBZ'):
                # ungrammatical dependency (parse error)
                continue

            n_deps += 1
            dep['sentence'] = self.represent_sentence(sent)
            dep['pos_sentence'] = ' '.join(x[POS] for x in sent)
            dep['orig_sentence'] = ' '.join(x[WORD] for x in sent)
            dep['all_nouns'] = self.only_nouns(sent, len(sent))
            dep['nouns_up_to_verb'] = self.only_nouns(sent, 
                                                      int(dep['verb_index']))
            self.deps.append(dep)

            if self.verbose and self.stop_after and n_deps % 10 == 0:
                progbar.update(n_deps) 
開發者ID:TalLinzen,項目名稱:rnn_agreement,代碼行數:40,代碼來源:collect_agreement.py

示例11: reset

# 需要導入模塊: from keras.utils import generic_utils [as 別名]
# 或者: from keras.utils.generic_utils import Progbar [as 別名]
def reset(self):
        self.interval_start = timeit.default_timer()
        self.progbar = Progbar(target=self.interval)
        self.metrics = []
        self.infos = []
        self.info_names = None
        self.episode_rewards = [] 
開發者ID:Kjell-K,項目名稱:AirGym,代碼行數:9,代碼來源:callbacks.py

示例12: epoch_end_callback

# 需要導入模塊: from keras.utils import generic_utils [as 別名]
# 或者: from keras.utils.generic_utils import Progbar [as 別名]
def epoch_end_callback(self, sess, sv, epoch_num):
        # Evaluate val loss
        validation_iou = 0
        print("\nComputing Validation IoU")
        progbar = Progbar(target=self.val_steps_per_epoch)

        for i in range(self.val_steps_per_epoch):
            loss_iou = sess.run(self.val_iou,
                             feed_dict={self.is_training: False})
            validation_iou+= loss_iou
            progbar.update(i)
        validation_iou /= self.val_steps_per_epoch*self.config.batch_size

        # Log to Tensorflow board
        val_sum = sess.run(self.val_sum, feed_dict ={
                           self.val_iou_ph: validation_iou})

        sv.summary_writer.add_summary(val_sum, epoch_num)

        print("Epoch [{}] Validation IoU: {}".format(
            epoch_num, validation_iou))
        # Model Saving
        if validation_iou > self.min_val_iou:
            self.save(sess, self.config.checkpoint_dir, 'best')
            self.min_val_iou = validation_iou
        if epoch_num % self.config.save_freq == 0:
            self.save(sess, self.config.checkpoint_dir, epoch_num) 
開發者ID:antonilo,項目名稱:unsupervised_detection,代碼行數:29,代碼來源:adversarial_learner.py

示例13: encode_texts

# 需要導入模塊: from keras.utils import generic_utils [as 別名]
# 或者: from keras.utils.generic_utils import Progbar [as 別名]
def encode_texts(self, texts, include_oov=False, verbose=1, **kwargs):
        """Encodes the given texts using internal vocabulary with optionally applied encoding options. See
        ``apply_encoding_options` to set various options.

        Args:
            texts: The list of text items to encode.
            include_oov: True to map unknown (out of vocab) tokens to 0. False to exclude the token.
            verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1)
            **kwargs: The kwargs for `token_generator`.

        Returns:
            The encoded texts.
        """
        if not self.has_vocab:
            raise ValueError("You need to build the vocabulary using `build_vocab` before using `encode_texts`")

        progbar = Progbar(len(texts), verbose=verbose, interval=0.25)
        encoded_texts = []
        for token_data in self.token_generator(texts, **kwargs):
            indices, token = token_data[:-1], token_data[-1]

            token_idx = self._token2idx.get(token)
            if token_idx is None and include_oov:
                token_idx = 0

            if token_idx is not None:
                _append(encoded_texts, indices, token_idx)

            # Update progressbar per document level.
            progbar.update(indices[0])

        # All done. Finalize progressbar.
        progbar.update(len(texts), force=True)
        return encoded_texts 
開發者ID:raghakot,項目名稱:keras-text,代碼行數:36,代碼來源:processing.py

示例14: build_vocab

# 需要導入模塊: from keras.utils import generic_utils [as 別名]
# 或者: from keras.utils.generic_utils import Progbar [as 別名]
def build_vocab(self, texts, verbose=1, **kwargs):
        """Builds the internal vocabulary and computes various statistics.

        Args:
            texts: The list of text items to encode.
            verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1)
            **kwargs: The kwargs for `token_generator`.
        """
        if self.has_vocab:
            logger.warn("Tokenizer already has existing vocabulary. Overriding and building new vocabulary.")

        progbar = Progbar(len(texts), verbose=verbose, interval=0.25)
        count_tracker = _CountTracker()

        self._token_counts.clear()
        self._num_texts = len(texts)

        for token_data in self.token_generator(texts, **kwargs):
            indices, token = token_data[:-1], token_data[-1]
            count_tracker.update(indices)
            self._token_counts[token] += 1

            # Update progressbar per document level.
            progbar.update(indices[0])

        # Generate token2idx and idx2token.
        self.create_token_indices(self._token_counts.keys())

        # All done. Finalize progressbar update and count tracker.
        count_tracker.finalize()
        self._counts = count_tracker.counts
        progbar.update(len(texts), force=True) 
開發者ID:raghakot,項目名稱:keras-text,代碼行數:34,代碼來源:processing.py

示例15: tensorise_smiles_mp

# 需要導入模塊: from keras.utils import generic_utils [as 別名]
# 或者: from keras.utils.generic_utils import Progbar [as 別名]
def tensorise_smiles_mp(smiles, max_degree=5, max_atoms=None, workers=cpu_count()-1, chunksize=3000, verbose=True):
    ''' Multiprocess implementation of `tensorise_smiles`

    # Arguments:
        See `tensorise_smiles` documentation

    # Additional arguments:
        workers: int, num parallel processes
        chunksize: int, num molecules tensorised per worker, bigger chunksize is
            preffered as each process will preallocate np.arrays

    # Returns:
        See `tensorise_smiles` documentation

    # TODO:
        - fix python keyboardinterrupt bug:
          https://noswap.com/blog/python-multiprocessing-keyboardinterrupt
        - replace progbar with proper logging
    '''

    pool = Pool(processes=workers)

    # Create an iterator
    #http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
    def chunks(l, n):
        """Yield successive n-sized chunks from l."""
        for i in range(0, len(l), n):
            yield l[i:i + n]
    smiles_chunks = chunks(smiles, chunksize)

    # MAP: Tensorise in parallel
    map_function = partial(tensorise_smiles, max_degree=max_degree, max_atoms=max_atoms)
    if verbose:
        print('Tensorising molecules in batches...')
        pbar = Progbar(len(smiles), width=50)
        tensor_list = []
        for tensors in pool.imap(map_function, smiles_chunks):
            pbar.add(tensors[0].shape[0])
            tensor_list.append(tensors)
        print('Merging batch tensors...    ', end='')
    else:
        tensor_list = pool.map(map_function, smiles_chunks)
    if verbose:
        print('[DONE]')

    # REDUCE: Concatenate the obtained tensors
    pool.close()
    pool.join()
    return concat_mol_tensors(tensor_list, match_degree=max_degree!=None, match_max_atoms=max_atoms!=None) 
開發者ID:keiserlab,項目名稱:keras-neural-graph-fingerprint,代碼行數:51,代碼來源:preprocessing.py


注:本文中的keras.utils.generic_utils.Progbar方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。