当前位置: 首页>>代码示例>>Python>>正文


Python logger.display函数代码示例

本文整理汇总了Python中neon.logger.display函数的典型用法代码示例。如果您正苦于以下问题:Python display函数的具体用法?Python display怎么用?Python display使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了display函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: fetch_dataset

    def fetch_dataset(url, sourcefile, destfile, totalsz):
        """
        Download the file specified by the given URL.

        Args:
            url (str): Base URL of the file to be downloaded.
            sourcefile (str): Name of the source file.
            destfile (str): Path to the destination.
            totalsz (int): Size of the file to be downloaded.
        """
        req = Request(os.path.join(url, sourcefile), headers={'User-Agent': 'neon'})
        # backport https limitation and workaround per http://python-future.org/imports.html
        cloudfile = urlopen(req)
        neon_logger.display("Downloading file: {}".format(destfile))
        blockchar = u'\u2588'  # character to display in progress bar
        with open(destfile, 'wb') as f:
            data_read = 0
            chunksz = 1024**2
            while 1:
                data = cloudfile.read(chunksz)
                if not data:
                    break
                data_read = min(totalsz, data_read + chunksz)
                progress_string = u'Download Progress |{:<50}| '.format(
                    blockchar * int(float(data_read) / totalsz * 50))
                sys.stdout.write('\r')
                if PY3:
                    sys.stdout.write(progress_string)
                else:
                    sys.stdout.write(progress_string.encode("utf-8"))
                sys.stdout.flush()

                f.write(data)
            neon_logger.display("Download Complete")
开发者ID:NervanaSystems,项目名称:neon,代码行数:34,代码来源:datasets.py

示例2: display_model_params

def display_model_params(neon_args, neon_root_yaml):
    """
    Display model parameters
    :param      neon_args: contains command line arguments,
    :param neon_root_yaml: contains YAML elements
    """
    output_string = '\n-- INFORMATION: HYPER PARAMETERS ------\n'
    try:
        output_string = add_param_to_output(output_string,
                                            'backend',
                                            neon_args.backend)
        output_string = add_param_to_output(output_string,
                                            'batch size',
                                            neon_args.batch_size)
        output_string = add_param_to_output(output_string,
                                            'epochs',
                                            neon_args.epochs)
        output_string = add_param_to_output(output_string,
                                            'optimizer type',
                                            neon_root_yaml['optimizer']['type'])
        output_string = add_param_to_output(output_string,
                                            'learning rate',
                                            neon_root_yaml['optimizer']['config']['learning_rate'])
        output_string = add_param_to_output(output_string,
                                            'momentum coef',
                                            neon_root_yaml['optimizer']['config']['momentum_coef'])
    except Exception:
        output_string += 'Some parameters cannot be displayed\n'
    output_string += '----------------------------------------'
    neon_logger.display(output_string)
开发者ID:StevenLOL,项目名称:neon,代码行数:30,代码来源:display_information.py

示例3: train_model

def train_model(lrmodel, opt, cost, X, Y, devX, devY, devscores):
    """
    Train model, using pearsonr on dev for early stopping
    """
    done = False
    best = -1.0
    r = np.arange(1, 6)

    train_set = ArrayIterator(X=X, y=Y, make_onehot=False)
    valid_set = ArrayIterator(X=devX, y=devY, make_onehot=False)

    eval_epoch = 10

    while not done:
        callbacks = Callbacks(lrmodel, eval_set=valid_set)

        lrmodel.fit(train_set, optimizer=opt, num_epochs=eval_epoch,
                    cost=cost, callbacks=callbacks)

        # Every 10 epochs, check Pearson on development set
        yhat = np.dot(lrmodel.get_outputs(valid_set), r)
        score = pearsonr(yhat, devscores)[0]
        if score > best:
            neon_logger.display('Dev Pearson: {}'.format(score))
            best = score
            bestlrmodel = copy.copy(lrmodel)
        else:
            done = True

        eval_epoch += 10

    yhat = np.dot(bestlrmodel.get_outputs(valid_set), r)
    score = pearsonr(yhat, devscores)[0]
    neon_logger.display('Dev Pearson: {}'.format(score))
    return bestlrmodel
开发者ID:NervanaSystems,项目名称:neon,代码行数:35,代码来源:eval_sick.py

示例4: train_or_val_pairs

    def train_or_val_pairs(self, setn):
        """
        untar imagenet tar files into directories that indicate their label.

        returns [(filename, label), ...] for train or val set partitions
        """
        img_dir = os.path.join(self.out_dir, setn)

        neon_logger.display("Extracting %s files" % (setn))
        root_tf_path = self.tars[setn]
        if not os.path.exists(root_tf_path):
            raise IOError(("tar file {} not found. Ensure you have ImageNet downloaded"
                           ).format(root_tf_path))

        try:
            root_tf = tarfile.open(root_tf_path)
        except tarfile.ReadError as e:
            raise ValueError('ReadError opening {}: {}'.format(root_tf_path, e))

        label_dict = self.extract_labels(setn)
        subpaths = root_tf.getmembers()
        arg_iterator = zip(repeat(self.target_size), repeat(root_tf_path), repeat(img_dir),
                           repeat(setn), repeat(label_dict), subpaths)
        pool = multiprocessing.Pool()

        pairs = []
        for pair_list in tqdm.tqdm(pool.imap_unordered(process_i1k_tar_subpath, arg_iterator),
                                   total=len(subpaths)):
            pairs.extend(pair_list)
        pool.close()
        pool.join()
        root_tf.close()

        return pairs
开发者ID:StevenLOL,项目名称:neon,代码行数:34,代码来源:ingest.py

示例5: extract_images

    def extract_images(self, overwrite=False):
        for setn in ('train', 'val'):
            img_dir = os.path.join(self.out_dir, setn)

            neon_logger.display("Extracting %s files" % (setn))
            toptar = getattr(self, setn + '_tar')
            label_dict = getattr(self, setn + '_labels')
            name_slice = slice(None, 9) if setn == 'train' else slice(15, -5)
            with tarfile.open(toptar) as tf:
                for s in tf.getmembers():
                    label = label_dict[s.name[name_slice]]
                    subpath = os.path.join(img_dir, str(label))
                    if not os.path.exists(subpath):
                        os.makedirs(subpath)
                    if setn == 'train':
                        tarfp = tarfile.open(fileobj=tf.extractfile(s))
                        file_list = tarfp.getmembers()
                    else:
                        tarfp = tf
                        file_list = [s]

                    for fobj in file_list:
                        fname = os.path.join(subpath, fobj.name)
                        if not os.path.exists(fname) or overwrite:
                            with open(fname, 'wb') as jf:
                                jf.write(tarfp.extractfile(fobj).read())
开发者ID:Jokeren,项目名称:neon,代码行数:26,代码来源:batch_writer.py

示例6: write_csv_files

    def write_csv_files(self, overwrite=False):
        self.extract_images()
        for setn in ('train', 'val'):
            img_dir = os.path.join(self.out_dir, setn)
            csvfile = getattr(self, setn + '_file')
            neon_logger.display("Getting %s file list" % (setn))
            if os.path.exists(csvfile) and not overwrite:
                neon_logger.display("File %s exists, not overwriting" % (csvfile))
                continue
            flines = []

            subdirs = glob(os.path.join(img_dir, '*'))
            for subdir in subdirs:
                subdir_label = os.path.basename(subdir)  # This is the int label
                files = glob(os.path.join(subdir, self.file_pattern))
                flines += [(filename, subdir_label) for filename in files]

            if setn == 'train':
                np.random.seed(0)
                np.random.shuffle(flines)

            with gzip.open(csvfile, 'wb') as f:
                f.write('filename,l_id\n')
                for tup in flines:
                    f.write('{},{}\n'.format(*tup))
开发者ID:Jokeren,项目名称:neon,代码行数:25,代码来源:batch_writer.py

示例7: checkSequentialMatchesBatch

def checkSequentialMatchesBatch():
    """ check LSTM I/O forward/backward interactions """

    n, b, d = (5, 3, 4)  # sequence length, batch size, hidden size
    input_size = 10
    WLSTM = LSTM.init(input_size, d)  # input size, hidden size
    X = np.random.randn(n, b, input_size)
    h0 = np.random.randn(b, d)
    c0 = np.random.randn(b, d)

    # sequential forward
    cprev = c0
    hprev = h0
    caches = [{} for t in range(n)]
    Hcat = np.zeros((n, b, d))
    for t in range(n):
        xt = X[t:t + 1]
        _, cprev, hprev, cache = LSTM.forward(xt, WLSTM, cprev, hprev)
        caches[t] = cache
        Hcat[t] = hprev

    # sanity check: perform batch forward to check that we get the same thing
    H, _, _, batch_cache = LSTM.forward(X, WLSTM, c0, h0)
    assert np.allclose(H, Hcat), 'Sequential and Batch forward don''t match!'

    # eval loss
    wrand = np.random.randn(*Hcat.shape)
    # loss = np.sum(Hcat * wrand)
    dH = wrand

    # get the batched version gradients
    BdX, BdWLSTM, Bdc0, Bdh0 = LSTM.backward(dH, batch_cache)

    # now perform sequential backward
    dX = np.zeros_like(X)
    dWLSTM = np.zeros_like(WLSTM)
    dc0 = np.zeros_like(c0)
    dh0 = np.zeros_like(h0)
    dcnext = None
    dhnext = None
    for t in reversed(range(n)):
        dht = dH[t].reshape(1, b, d)
        dx, dWLSTMt, dcprev, dhprev = LSTM.backward(
            dht, caches[t], dcnext, dhnext)
        dhnext = dhprev
        dcnext = dcprev

        dWLSTM += dWLSTMt  # accumulate LSTM gradient
        dX[t] = dx[0]
        if t == 0:
            dc0 = dcprev
            dh0 = dhprev

    # and make sure the gradients match
    neon_logger.display('Making sure batched version agrees with sequential version: '
                        '(should all be True)')
    neon_logger.display(np.allclose(BdX, dX))
    neon_logger.display(np.allclose(BdWLSTM, dWLSTM))
    neon_logger.display(np.allclose(Bdc0, dc0))
    neon_logger.display(np.allclose(Bdh0, dh0))
开发者ID:JediKoder,项目名称:neon,代码行数:60,代码来源:lstm_ref.py

示例8: display_platform_information

def display_platform_information():
    """
    Display platform information.
    """
    import platform
    output_string = '\n-- INFORMATION: PLATFORM & OS ---------\n'

    try:

        output_string = add_param_to_output(output_string,
                                            'OS',
                                            platform.platform())
        output_string = add_param_to_output(output_string,
                                            'OS release version',
                                            platform.version())
        output_string = add_param_to_output(output_string,
                                            'machine',
                                            platform.machine())
        output_string = add_param_to_output(output_string,
                                            'node',
                                            platform.node())
        output_string = add_param_to_output(output_string,
                                            'python version',
                                            platform.python_version())
        output_string = add_param_to_output(output_string,
                                            'python build',
                                            platform.python_build())
        output_string = add_param_to_output(output_string,
                                            'python compiler',
                                            platform.python_compiler())

    except Exception:
        output_string += 'Some platform information cannot be displayed\n'
    output_string += '----------------------------------------'
    neon_logger.display(output_string)
开发者ID:StevenLOL,项目名称:neon,代码行数:35,代码来源:display_information.py

示例9: run

 def run(self):
     if not os.path.exists(self.out_dir):
         os.makedirs(self.out_dir)
     neon_logger.display("Writing train macrobatches")
     self.write_batches(self.train_start, self.labels['train'], self.imgs['train'])
     neon_logger.display("Writing validation macrobatches")
     self.write_batches(self.val_start, self.labels['val'], self.imgs['val'])
     self.save_meta()
开发者ID:Jokeren,项目名称:neon,代码行数:8,代码来源:batch_writer.py

示例10: __init__

    def __init__(self, path='.', task='qa1_single-supporting-fact', subset='en'):
        """
        Load bAbI dataset and extract text and read the stories
        For a particular task, the class will read both train and test files
        and combine the vocabulary.

        Arguments:
            path (str): Directory to store the dataset
            task (str): a particular task to solve (all bAbI tasks are train
                        and tested separately)
            subset (str): subset of the dataset to use:
                          {en, en-10k, shuffled, hn, hn-10k, shuffled-10k}
        """
        url = 'http://www.thespermwhale.com/jaseweston/babi'
        size = 11745123
        filename = 'tasks_1-20_v1-2.tar.gz'
        super(BABI, self).__init__(filename,
                                   url,
                                   size,
                                   path=path)
        self.task = task
        self.subset = subset

        neon_logger.display('Preparing bAbI dataset or extracting from %s' % path)
        neon_logger.display('Task is %s/%s' % (subset, task))
        self.tasks = [
            'qa1_single-supporting-fact',
            'qa2_two-supporting-facts',
            'qa3_three-supporting-facts',
            'qa4_two-arg-relations',
            'qa5_three-arg-relations',
            'qa6_yes-no-questions',
            'qa7_counting',
            'qa8_lists-sets',
            'qa9_simple-negation',
            'qa10_indefinite-knowledge',
            'qa11_basic-coreference',
            'qa12_conjunction',
            'qa13_compound-coreference',
            'qa14_time-reasoning',
            'qa15_basic-deduction',
            'qa16_basic-induction',
            'qa17_positional-reasoning',
            'qa18_size-reasoning',
            'qa19_path-finding',
            'qa20_agents-motivations'
        ]
        assert task in self.tasks, "given task is not in the bAbI dataset"

        self.train_file, self.test_file = self.load_data(path, task)
        self.train_parsed = BABI.parse_babi(self.train_file)
        self.test_parsed = BABI.parse_babi(self.test_file)

        self.compute_statistics()
        self.train = self.vectorize_stories(self.train_parsed)
        self.test = self.vectorize_stories(self.test_parsed)
开发者ID:JediKoder,项目名称:neon,代码行数:56,代码来源:questionanswer.py

示例11: test_dataset

def test_dataset(backend_default, data):
    dataset = MNIST(path=data)
    dataset.gen_iterators()
    train_set = dataset.data_dict['train']
    train_set.be = NervanaObject.be

    for i in range(2):
        for X_batch, y_batch in train_set:
            neon_logger.display("Xshape: {}, yshape: {}".format(X_batch.shape, y_batch.shape))
        train_set.index = 0
开发者ID:NervanaSystems,项目名称:neon,代码行数:10,代码来源:test_dataset.py

示例12: test_dataset

def test_dataset(backend_default, data):
    (X_train, y_train), (X_test, y_test), nclass = load_mnist(path=data)

    train_set = ArrayIterator(X_train, y_train, nclass=nclass)
    train_set.be = NervanaObject.be

    for i in range(2):
        for X_batch, y_batch in train_set:
            neon_logger.display("Xshape: {}, yshape: {}".format(X_batch.shape, y_batch.shape))
        train_set.index = 0
开发者ID:JediKoder,项目名称:neon,代码行数:10,代码来源:test_dataset.py

示例13: load_vocab

    def load_vocab(self):
        """
        Load vocab and initialize buffers
        Input sentence batch is of dimension (vocab_size, max_sentence_length * batch_size)
        where each column is the 1-hot representation of a word and the first batch_size columns
        are the first words of each sentence.
        """

        sentences = [sent['tokens'] for sent in self.iterSentences()]
        # Flatten list of list of words to one list of words
        words = [word for sentence in sentences for word in sentence]
        # Count words and keep words greater than threshold
        word_counts = Counter(words)

        vocab = [self.end_token] + \
                [word for word in list(word_counts.keys()) if word_counts[word] >= 5]
        self.vocab_size = len(vocab)
        self.vocab_to_index = dict((c, i) for i, c in enumerate(vocab))
        self.index_to_vocab = dict((i, c) for i, c in enumerate(vocab))

        # Compute optional bias vector for initializing final linear layer bias
        word_counts[self.end_token] = len(sentences)
        self.bias_init = np.array([1.0 * word_counts[self.index_to_vocab[i]]
                                   for i in self.index_to_vocab]).reshape((self.vocab_size, 1))
        self.bias_init /= np.sum(self.bias_init)
        self.bias_init = np.log(self.bias_init)
        self.bias_init -= np.max(self.bias_init)

        self.max_sentence_length = max(len(sent) for sent in sentences) + 1

        self.dev_image = self.be.iobuf(self.image_size)
        self.dev_imageT = self.be.empty(self.dev_image.shape[::-1])
        self.dev_X = self.be.iobuf((self.vocab_size, self.max_sentence_length))
        self.dev_y = self.be.iobuf((self.vocab_size, self.max_sentence_length + 1))
        # Create mask to deal with variable length sentences
        self.dev_y_mask = self.be.iobuf((self.vocab_size, self.max_sentence_length + 1))
        self.y_mask = np.zeros(self.dev_y_mask.shape,
                               dtype=np.uint8).reshape(self.vocab_size,
                                                       self.max_sentence_length + 1, -1)
        self.y_mask_reshape = self.y_mask.reshape(self.dev_y_mask.shape)

        self.dev_lbl = self.be.iobuf(self.max_sentence_length, dtype=np.int32)
        self.dev_lblT = self.be.empty(self.dev_lbl.shape[::-1])
        self.dev_lblflat = self.dev_lbl.reshape((1, self.dev_lbl.size))

        self.dev_y_lbl = self.be.iobuf(self.max_sentence_length + 1, dtype=np.int32)
        self.dev_y_lblT = self.be.empty(self.dev_y_lbl.shape[::-1])
        self.dev_y_lblflat = self.dev_y_lbl.reshape((1, self.dev_y_lbl.size))

        self.shape = [self.image_size, (self.vocab_size, self.max_sentence_length)]
        neon_logger.display("Vocab size: %d, Max sentence length: %d" % (self.vocab_size,
                                                                         self.max_sentence_length))
开发者ID:NervanaSystems,项目名称:neon,代码行数:52,代码来源:imagecaption.py

示例14: _print_tree

def _print_tree(node, level=0):
    """
    print tree with indentation
    """

    if type(node) is list:
        neon_logger.display(("    " * level) + ", ".join(native_str(s) for s in node[0:3]))
        if len(node) > 3:
            _print_tree(node[3], level + 1)
        if len(node) > 4:
            _print_tree(node[4], level + 1)
    else:
        neon_logger.display(("    " * level) + native_str(node))
开发者ID:NervanaSystems,项目名称:neon,代码行数:13,代码来源:float_ew.py

示例15: train_mlp_classifier

def train_mlp_classifier(dataset, model_file_path, num_epochs, callback_args):
    """
    Train the np_semantic_segmentation mlp classifier
    Args:
        model_file_path (str): model path
        num_epochs (int): number of epochs
        callback_args (dict): callback_arg
        dataset: NpSemanticSegData object containing the dataset

    Returns:
        print error_rate, test_accuracy_rate and precision_recall_rate evaluation from the model

    """
    model = NpSemanticSegClassifier(num_epochs, callback_args)
    model.build()
    # run fit
    model.fit(dataset.test_set, dataset.train_set)
    # save model params
    model.save(model_file_path)
    # set evaluation error rates
    error_rate, test_accuracy_rate, precision_recall_rate = model.eval(dataset.test_set)
    neon_logger.display('Misclassification error = %.1f%%' %
                        (error_rate * 100))
    neon_logger.display('Test accuracy rate = %.1f%%' %
                        (test_accuracy_rate * 100))
    neon_logger.display('precision rate = %s!!' %
                        (str(precision_recall_rate[0])))
    neon_logger.display('recall rate = %s!!' %
                        (str(precision_recall_rate[1])))
开发者ID:cdj0311,项目名称:nlp-architect,代码行数:29,代码来源:train.py


注:本文中的neon.logger.display函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。