当前位置: 首页>>代码示例>>Python>>正文


Python generic_utils.Progbar方法代码示例

本文整理汇总了Python中keras.utils.generic_utils.Progbar方法的典型用法代码示例。如果您正苦于以下问题:Python generic_utils.Progbar方法的具体用法?Python generic_utils.Progbar怎么用?Python generic_utils.Progbar使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.utils.generic_utils的用法示例。


在下文中一共展示了generic_utils.Progbar方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: results

# 需要导入模块: from keras.utils import generic_utils [as 别名]
# 或者: from keras.utils.generic_utils import Progbar [as 别名]
def results(self):
        recs = []
        columns = ['gram_loss', 'ungram_loss', 'correct'] + dependency_fields
        self.model.model._make_test_function()
        progbar = Progbar(len(self.deps_test))
        for i, dep in enumerate(self.deps_test):
            inp = np.zeros((1, self.maxlen))
            v = int(dep['verb_index']) - 1
            tokens = dep[self.field].split()[:v+1]
            ints = [self.vocab_to_ints[x] for x in tokens]
            try:
                ungram = self.vocab_to_ints[self.inflect_verb[tokens[v]]]
            except KeyError:   # reinflected form not in vocabulary: ignore
                continue
            n = len(ints) - 1
            inp[0, -n:] = ints[:-1]
            gram_loss = self.model.test_on_batch(inp, np.array([ints[v]]))
            ungram_loss = self.model.test_on_batch(inp, np.array([ungram]))
            recs.append((gram_loss, ungram_loss, gram_loss < ungram_loss) +
                        tuple(dep[x] for x in dependency_fields))
            if i % 16 == 0:
                progbar.update(i)

        self.test_results = pd.DataFrame(recs, columns=columns) 
开发者ID:TalLinzen,项目名称:rnn_agreement,代码行数:26,代码来源:language_model.py

示例2: play

# 需要导入模块: from keras.utils import generic_utils [as 别名]
# 或者: from keras.utils.generic_utils import Progbar [as 别名]
def play(self, env, epoch=1, batch_size=1, visualize=None, verbose=1):
        print("Free play started!")
        frames = np.zeros((0, ) + env.observe_image().shape[1:])
        frames = frames.transpose(0, 2, 3, 1)
        progbar = Progbar(epoch)

        for e in xrange(epoch):
            # reset environment on each epoch
            env.reset()
            game_over = False
            loss = 0
            rewards = 0

            # get initial observation, start game
            obs_t = env.observe()
            while not game_over:
                obs_tm1 = obs_t

                # get next action
                action = self.policy(obs_tm1, train=False)

                # apply action, get rewareds and new state
                obs_t, reward, game_over = env.update(action)
                rewards += reward

                frame_t = env.observe_image().transpose(0, 2, 3, 1)
                frames = np.concatenate([frames, frame_t], axis=0)

            if verbose == 1:
                progbar.add(1, values=[("loss", loss), ("rewards", rewards)])


        if visualize:
            from agnez.video import make_gif
            print("Making gif!")
            frames = np.repeat(frames, 3, axis=-1)
            make_gif(frames[:visualize['n_frames']],
                     filepath=visualize['filepath'], gray=visualize['gray'], interpolation='none')
            print("See your gif at {}".format(visualize['filepath'])) 
开发者ID:EderSantana,项目名称:X,代码行数:41,代码来源:agent.py

示例3: test_progbar

# 需要导入模块: from keras.utils import generic_utils [as 别名]
# 或者: from keras.utils.generic_utils import Progbar [as 别名]
def test_progbar():
    values_s = [None,
                [['key1', 1], ['key2', 1e-4]],
                [['key3', 1], ['key2', 1e-4]]]

    for target in (len(values_s) - 1, None):
        for verbose in (0, 1, 2):
            bar = Progbar(target, width=30, verbose=verbose, interval=0.05)
            for current, values in enumerate(values_s):
                bar.update(current, values=values) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:12,代码来源:generic_utils_test.py

示例4: new_generate_dataset

# 需要导入模块: from keras.utils import generic_utils [as 别名]
# 或者: from keras.utils.generic_utils import Progbar [as 别名]
def new_generate_dataset(dataset, samples, gen_test, beam_size, hypo_len, noise_size, cmodel):

    vgen = val_generator(dataset, gen_test, beam_size, hypo_len, noise_size)
    p = Progbar(samples)
    batchez = []
    while p.seen_so_far < samples:
        batch = next(vgen)
        probs = cmodel.predict([batch[0], batch[1]], verbose = 0)
        batch += (probs,)

        p.add(len(batch[0]))
        batchez.append(batch)
    return merge_result_batches(batchez) 
开发者ID:jstarc,项目名称:nli_generation,代码行数:15,代码来源:augment.py

示例5: validate

# 需要导入模块: from keras.utils import generic_utils [as 别名]
# 或者: from keras.utils.generic_utils import Progbar [as 别名]
def validate(dev, gen_test, beam_size, hypo_len, samples, noise_size, glove, cmodel = None, adverse = False, 
                 diverse = False):
    vgen = val_generator(dev, gen_test, beam_size, hypo_len, noise_size)
    p = Progbar(samples)
    batchez = []
    while p.seen_so_far < samples:
        batch = next(vgen)
        preplexity = np.mean(np.power(2, batch[2]))
        loss = np.mean(batch[2])
        losses = [('hypo_loss',loss),('perplexity', preplexity)]
        if cmodel is not None:
            ceval = cmodel.evaluate([batch[0], batch[1]], batch[4], verbose = 0)
            losses += [('class_loss', ceval[0]), ('class_acc', ceval[1])]
            probs = cmodel.predict([batch[0], batch[1]], verbose = 0)
            losses += [('class_entropy', np.mean(-np.sum(probs * np.log(probs), axis=1)))]
        
        p.add(len(batch[0]), losses)
        batchez.append(batch)
    batchez = merge_result_batches(batchez)
    
    res = {}
    if adverse:
        val_loss = adverse_validation(dev, batchez, glove)
        print 'adverse_loss:', val_loss
        res['adverse_loss'] = val_loss
    if diverse:
        div, _, _, _ = diversity(dev, gen_test, beam_size, hypo_len, noise_size, 64, 32)
        res['diversity'] = div
    print
    for val in p.unique_values:
        arr = p.sum_values[val]
        res[val] = arr[0] / arr[1]
    return res 
开发者ID:jstarc,项目名称:nli_generation,代码行数:35,代码来源:generative_alg.py

示例6: diversity

# 需要导入模块: from keras.utils import generic_utils [as 别名]
# 或者: from keras.utils.generic_utils import Progbar [as 别名]
def diversity(dev, gen_test, beam_size, hypo_len, noise_size, per_premise, samples):
    step = len(dev[0]) / samples
    sind = [i * step for i in range(samples)]
    p = Progbar(per_premise * samples)
    for i in sind:
        hypos = []
        unique_words = []
        hypo_list = []
        premise = dev[0][i]
        prem_list = set(cut_zeros(list(premise)))        
        while len(hypos) < per_premise:
            label = np.argmax(dev[2][i])
            words = single_generate(premise, label, gen_test, beam_size, hypo_len, noise_size)
            hypos += [str(ex) for ex in words]
            unique_words += [int(w) for ex in words for w in ex if w > 0]
            hypo_list += [set(cut_zeros(list(ex))) for ex in words]
        
        jacks = []  
        prem_jacks = []
        for u in range(len(hypo_list)):
            sim_prem = len(hypo_list[u] & prem_list)/float(len(hypo_list[u] | prem_list))
            prem_jacks.append(sim_prem)
            for v in range(u+1, len(hypo_list)):
                sim = len(hypo_list[u] & hypo_list[v])/float(len(hypo_list[u] | hypo_list[v]))
                jacks.append(sim)
        avg_dist_hypo = 1 -  np.mean(jacks)
        avg_dist_prem = 1 -  np.mean(prem_jacks)
        d = entropy(Counter(hypos).values()) 
        w = entropy(Counter(unique_words).values())
        p.add(len(hypos), [('diversity', d),('word_entropy', w),('avg_dist_hypo', avg_dist_hypo), ('avg_dist_prem', avg_dist_prem)])
    arrd = p.sum_values['diversity']
    arrw = p.sum_values['word_entropy']
    arrj = p.sum_values['avg_dist_hypo']
    arrp = p.sum_values['avg_dist_prem']
    
    return arrd[0] / arrd[1], arrw[0] / arrw[1], arrj[0] / arrj[1],  arrp[0] / arrp[1] 
开发者ID:jstarc,项目名称:nli_generation,代码行数:38,代码来源:generative_alg.py

示例7: test_points

# 需要导入模块: from keras.utils import generic_utils [as 别名]
# 或者: from keras.utils.generic_utils import Progbar [as 别名]
def test_points(premises, labels, noises, gtest, cmodel, hypo_len):
    p = Progbar(len(premises))
    hypos = []
    bs = 64 
    for i in range(len(labels) / bs):
        words, _  = generative_predict_beam(gtest, premises[i * bs: (i+1)*bs], 
                          noises[i * bs: (i+1)*bs,None,:], labels[i * bs: (i+1)*bs], True, hypo_len)
        hypos.append(words)
        p.add(len(words))
    hypos = np.vstack(hypos)
    cpreds = cmodel.evaluate([premises[:len(hypos)], hypos], labels[:len(hypos)])
    print cpreds 
开发者ID:jstarc,项目名称:nli_generation,代码行数:14,代码来源:visualize.py

示例8: reset

# 需要导入模块: from keras.utils import generic_utils [as 别名]
# 或者: from keras.utils.generic_utils import Progbar [as 别名]
def reset(self):
        """ Reset statistics """
        self.interval_start = timeit.default_timer()
        self.progbar = Progbar(target=self.interval)
        self.metrics = []
        self.infos = []
        self.info_names = None
        self.episode_rewards = [] 
开发者ID:keras-rl,项目名称:keras-rl,代码行数:10,代码来源:callbacks.py

示例9: evaluate

# 需要导入模块: from keras.utils import generic_utils [as 别名]
# 或者: from keras.utils.generic_utils import Progbar [as 别名]
def evaluate(self, howmany=1000):
        self.model.model._make_test_function()
        random.seed(0)
        shuffled = self.deps_test[:]
        random.shuffle(shuffled)
        shuffled = shuffled[:howmany]
        X_test = []
        Y_test = []

        for dep in shuffled:
            tokens = self.process_single_dependency(dep)
            ints = []
            for token in tokens:
                if token not in self.vocab_to_ints:
                    # zero is for pad
                    x = self.vocab_to_ints[token] = len(self.vocab_to_ints) + 1
                    self.ints_to_vocab[x] = token
                ints.append(self.vocab_to_ints[token])

            first = 1
            for i in range(first, len(ints) - 1):
                X_test.append(ints[:i])
                Y_test.append(ints[i])

        test_loss = []
        end = int(float(len(X_test) / self.batch_size))
        progbar = Progbar(end)
        for i in range(0, len(X_test), self.batch_size):
            inp = sequence.pad_sequences(X_test[i:i+self.batch_size],
                                         maxlen=self.maxlen)
            out = Y_test[i:i+self.batch_size]
            output = self.model.test_on_batch(inp, out)
            test_loss.append(output)
            j = int(float(i) / self.batch_size)
            if j % 16 == 0:
                progbar.update(j)
        progbar.update(end)

        return np.mean(test_loss) 
开发者ID:TalLinzen,项目名称:rnn_agreement,代码行数:41,代码来源:language_model.py

示例10: collect_agreement

# 需要导入模块: from keras.utils import generic_utils [as 别名]
# 或者: from keras.utils.generic_utils import Progbar [as 别名]
def collect_agreement(self):
        n_deps = 0
        self.deps = []
        random.seed(1)

        if self.verbose and self.stop_after:
            from keras.utils.generic_utils import Progbar
            progbar = Progbar(self.stop_after)

        for i, sent in enumerate(tokenize_blanks(zread(self.infile)), 1):
            if self.stop_after is not None and n_deps >= self.stop_after:
                break
            if i % (self.skip + 1) != 0:
                continue

            # only one dependency per sentence
            deps = self.find_nsubj_agreement(sent)
            if len(deps) == 0:
                continue
            dep = random.choice(deps)
            if dep['subj_index'] > dep['verb_index']:
                continue
            if (dep['subj_pos'] == 'NN' and dep['verb_pos'] == 'VBP' or 
                dep['subj_pos'] == 'NNS' and dep['verb_pos'] == 'VBZ'):
                # ungrammatical dependency (parse error)
                continue

            n_deps += 1
            dep['sentence'] = self.represent_sentence(sent)
            dep['pos_sentence'] = ' '.join(x[POS] for x in sent)
            dep['orig_sentence'] = ' '.join(x[WORD] for x in sent)
            dep['all_nouns'] = self.only_nouns(sent, len(sent))
            dep['nouns_up_to_verb'] = self.only_nouns(sent, 
                                                      int(dep['verb_index']))
            self.deps.append(dep)

            if self.verbose and self.stop_after and n_deps % 10 == 0:
                progbar.update(n_deps) 
开发者ID:TalLinzen,项目名称:rnn_agreement,代码行数:40,代码来源:collect_agreement.py

示例11: reset

# 需要导入模块: from keras.utils import generic_utils [as 别名]
# 或者: from keras.utils.generic_utils import Progbar [as 别名]
def reset(self):
        self.interval_start = timeit.default_timer()
        self.progbar = Progbar(target=self.interval)
        self.metrics = []
        self.infos = []
        self.info_names = None
        self.episode_rewards = [] 
开发者ID:Kjell-K,项目名称:AirGym,代码行数:9,代码来源:callbacks.py

示例12: epoch_end_callback

# 需要导入模块: from keras.utils import generic_utils [as 别名]
# 或者: from keras.utils.generic_utils import Progbar [as 别名]
def epoch_end_callback(self, sess, sv, epoch_num):
        # Evaluate val loss
        validation_iou = 0
        print("\nComputing Validation IoU")
        progbar = Progbar(target=self.val_steps_per_epoch)

        for i in range(self.val_steps_per_epoch):
            loss_iou = sess.run(self.val_iou,
                             feed_dict={self.is_training: False})
            validation_iou+= loss_iou
            progbar.update(i)
        validation_iou /= self.val_steps_per_epoch*self.config.batch_size

        # Log to Tensorflow board
        val_sum = sess.run(self.val_sum, feed_dict ={
                           self.val_iou_ph: validation_iou})

        sv.summary_writer.add_summary(val_sum, epoch_num)

        print("Epoch [{}] Validation IoU: {}".format(
            epoch_num, validation_iou))
        # Model Saving
        if validation_iou > self.min_val_iou:
            self.save(sess, self.config.checkpoint_dir, 'best')
            self.min_val_iou = validation_iou
        if epoch_num % self.config.save_freq == 0:
            self.save(sess, self.config.checkpoint_dir, epoch_num) 
开发者ID:antonilo,项目名称:unsupervised_detection,代码行数:29,代码来源:adversarial_learner.py

示例13: encode_texts

# 需要导入模块: from keras.utils import generic_utils [as 别名]
# 或者: from keras.utils.generic_utils import Progbar [as 别名]
def encode_texts(self, texts, include_oov=False, verbose=1, **kwargs):
        """Encodes the given texts using internal vocabulary with optionally applied encoding options. See
        ``apply_encoding_options` to set various options.

        Args:
            texts: The list of text items to encode.
            include_oov: True to map unknown (out of vocab) tokens to 0. False to exclude the token.
            verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1)
            **kwargs: The kwargs for `token_generator`.

        Returns:
            The encoded texts.
        """
        if not self.has_vocab:
            raise ValueError("You need to build the vocabulary using `build_vocab` before using `encode_texts`")

        progbar = Progbar(len(texts), verbose=verbose, interval=0.25)
        encoded_texts = []
        for token_data in self.token_generator(texts, **kwargs):
            indices, token = token_data[:-1], token_data[-1]

            token_idx = self._token2idx.get(token)
            if token_idx is None and include_oov:
                token_idx = 0

            if token_idx is not None:
                _append(encoded_texts, indices, token_idx)

            # Update progressbar per document level.
            progbar.update(indices[0])

        # All done. Finalize progressbar.
        progbar.update(len(texts), force=True)
        return encoded_texts 
开发者ID:raghakot,项目名称:keras-text,代码行数:36,代码来源:processing.py

示例14: build_vocab

# 需要导入模块: from keras.utils import generic_utils [as 别名]
# 或者: from keras.utils.generic_utils import Progbar [as 别名]
def build_vocab(self, texts, verbose=1, **kwargs):
        """Builds the internal vocabulary and computes various statistics.

        Args:
            texts: The list of text items to encode.
            verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1)
            **kwargs: The kwargs for `token_generator`.
        """
        if self.has_vocab:
            logger.warn("Tokenizer already has existing vocabulary. Overriding and building new vocabulary.")

        progbar = Progbar(len(texts), verbose=verbose, interval=0.25)
        count_tracker = _CountTracker()

        self._token_counts.clear()
        self._num_texts = len(texts)

        for token_data in self.token_generator(texts, **kwargs):
            indices, token = token_data[:-1], token_data[-1]
            count_tracker.update(indices)
            self._token_counts[token] += 1

            # Update progressbar per document level.
            progbar.update(indices[0])

        # Generate token2idx and idx2token.
        self.create_token_indices(self._token_counts.keys())

        # All done. Finalize progressbar update and count tracker.
        count_tracker.finalize()
        self._counts = count_tracker.counts
        progbar.update(len(texts), force=True) 
开发者ID:raghakot,项目名称:keras-text,代码行数:34,代码来源:processing.py

示例15: tensorise_smiles_mp

# 需要导入模块: from keras.utils import generic_utils [as 别名]
# 或者: from keras.utils.generic_utils import Progbar [as 别名]
def tensorise_smiles_mp(smiles, max_degree=5, max_atoms=None, workers=cpu_count()-1, chunksize=3000, verbose=True):
    ''' Multiprocess implementation of `tensorise_smiles`

    # Arguments:
        See `tensorise_smiles` documentation

    # Additional arguments:
        workers: int, num parallel processes
        chunksize: int, num molecules tensorised per worker, bigger chunksize is
            preffered as each process will preallocate np.arrays

    # Returns:
        See `tensorise_smiles` documentation

    # TODO:
        - fix python keyboardinterrupt bug:
          https://noswap.com/blog/python-multiprocessing-keyboardinterrupt
        - replace progbar with proper logging
    '''

    pool = Pool(processes=workers)

    # Create an iterator
    #http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
    def chunks(l, n):
        """Yield successive n-sized chunks from l."""
        for i in range(0, len(l), n):
            yield l[i:i + n]
    smiles_chunks = chunks(smiles, chunksize)

    # MAP: Tensorise in parallel
    map_function = partial(tensorise_smiles, max_degree=max_degree, max_atoms=max_atoms)
    if verbose:
        print('Tensorising molecules in batches...')
        pbar = Progbar(len(smiles), width=50)
        tensor_list = []
        for tensors in pool.imap(map_function, smiles_chunks):
            pbar.add(tensors[0].shape[0])
            tensor_list.append(tensors)
        print('Merging batch tensors...    ', end='')
    else:
        tensor_list = pool.map(map_function, smiles_chunks)
    if verbose:
        print('[DONE]')

    # REDUCE: Concatenate the obtained tensors
    pool.close()
    pool.join()
    return concat_mol_tensors(tensor_list, match_degree=max_degree!=None, match_max_atoms=max_atoms!=None) 
开发者ID:keiserlab,项目名称:keras-neural-graph-fingerprint,代码行数:51,代码来源:preprocessing.py


注:本文中的keras.utils.generic_utils.Progbar方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。