当前位置: 首页>>代码示例>>Python>>正文


Python download.cache_or_load_file方法代码示例

本文整理汇总了Python中chainer.dataset.download.cache_or_load_file方法的典型用法代码示例。如果您正苦于以下问题:Python download.cache_or_load_file方法的具体用法?Python download.cache_or_load_file怎么用?Python download.cache_or_load_file使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer.dataset.download的用法示例。


在下文中一共展示了download.cache_or_load_file方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _retrieve

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import cache_or_load_file [as 别名]
def _retrieve(n_layers, name_npz, name_caffemodel, model):
    root = download.get_dataset_directory('pfnet/chainer/models/')
    path = os.path.join(root, name_npz)
    path_caffemodel = os.path.join(root, name_caffemodel)
    return download.cache_or_load_file(
        path, lambda path: _make_npz(path, path_caffemodel, model, n_layers),
        lambda path: npz.load_npz(path, model)) 
开发者ID:pfnet-research,项目名称:nips17-adversarial-attack,代码行数:9,代码来源:resnet_layer.py

示例2: _retrieve

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import cache_or_load_file [as 别名]
def _retrieve(name, url, model):
    root = download.get_dataset_directory('pfnet/chainer/models/')
    path = os.path.join(root, name)
    return download.cache_or_load_file(
        path, lambda path: _make_npz(path, url, model),
        lambda path: npz.load_npz(path, model)) 
开发者ID:chainer,项目名称:chainer,代码行数:8,代码来源:vgg.py

示例3: _retrieve

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import cache_or_load_file [as 别名]
def _retrieve(name_npz, url, model):
    root = download.get_dataset_directory('pfnet/chainer/models/')
    path = os.path.join(root, name_npz)
    return download.cache_or_load_file(
        path, lambda path: _make_npz(path, url, model),
        lambda path: npz.load_npz(path, model)) 
开发者ID:chainer,项目名称:chainer,代码行数:8,代码来源:googlenet.py

示例4: _retrieve_ptb_words

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import cache_or_load_file [as 别名]
def _retrieve_ptb_words(name, url):
    def creator(path):
        vocab = _retrieve_word_vocabulary()
        words = _load_words(url)
        x = numpy.empty(len(words), dtype=numpy.int32)
        for i, word in enumerate(words):
            x[i] = vocab[word]

        numpy.savez_compressed(path, x=x)
        return {'x': x}

    root = download.get_dataset_directory('pfnet/chainer/ptb')
    path = os.path.join(root, name)
    loaded = download.cache_or_load_file(path, creator, numpy.load)
    return loaded['x'] 
开发者ID:chainer,项目名称:chainer,代码行数:17,代码来源:ptb.py

示例5: _retrieve_fashion_mnist

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import cache_or_load_file [as 别名]
def _retrieve_fashion_mnist(name, urls):
    root = download.get_dataset_directory('pfnet/chainer/fashion-mnist')
    path = os.path.join(root, name)
    return download.cache_or_load_file(
        path, lambda path: make_npz(path, urls), numpy.load) 
开发者ID:chainer,项目名称:chainer,代码行数:7,代码来源:fashion_mnist.py

示例6: _retrieve_kuzushiji_mnist

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import cache_or_load_file [as 别名]
def _retrieve_kuzushiji_mnist(name, urls):
    root = download.get_dataset_directory('pfnet/chainer/kuzushiji_mnist')
    path = os.path.join(root, name)
    return download.cache_or_load_file(
        path, lambda path: make_npz(path, urls), numpy.load) 
开发者ID:chainer,项目名称:chainer,代码行数:7,代码来源:kuzushiji_mnist.py

示例7: _retrieve_svhn

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import cache_or_load_file [as 别名]
def _retrieve_svhn(name, url):
    root = download.get_dataset_directory('pfnet/chainer/svhn')
    path = os.path.join(root, name)
    return download.cache_or_load_file(
        path, lambda path: _make_npz(path, url), numpy.load) 
开发者ID:chainer,项目名称:chainer,代码行数:7,代码来源:svhn.py

示例8: _retrieve_mnist

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import cache_or_load_file [as 别名]
def _retrieve_mnist(name, urls):
    root = download.get_dataset_directory('pfnet/chainer/mnist')
    path = os.path.join(root, name)
    return download.cache_or_load_file(
        path, lambda path: make_npz(path, urls), numpy.load) 
开发者ID:chainer,项目名称:chainer,代码行数:7,代码来源:mnist.py

示例9: _retrieve

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import cache_or_load_file [as 别名]
def _retrieve(name, url, model):
	root = download.get_dataset_directory('pfnet/chainer/models/')
	path = os.path.join(root, name)
	return download.cache_or_load_file(
		path, lambda path: _make_npz(path, url, model),
		lambda path: npz.load_npz(path, model)) 
开发者ID:alokwhitewolf,项目名称:Guided-Attention-Inference-Network,代码行数:8,代码来源:utils.py

示例10: _get_cifar

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import cache_or_load_file [as 别名]
def _get_cifar(name, withlabel, ndim, scale, dtype):
    root = download.get_dataset_directory(os.path.join('pfnet', 'chainer',
                                                       'cifar'))
    npz_path = os.path.join(root, '{}.npz'.format(name))
    url = 'https://www.cs.toronto.edu/~kriz/{}-python.tar.gz'.format(name)

    def creator(path):
        archive_path = download.cached_download(url)

        if name == 'cifar-10':
            train_x = numpy.empty((5, 10000, 3072), dtype=numpy.uint8)
            train_y = numpy.empty((5, 10000), dtype=numpy.uint8)
            test_y = numpy.empty(10000, dtype=numpy.uint8)

            dir_name = '{}-batches-py'.format(name)

            with tarfile.open(archive_path, 'r:gz') as archive:
                # training set
                for i in range(5):
                    file_name = '{}/data_batch_{}'.format(dir_name, i + 1)
                    d = _pickle_load(archive.extractfile(file_name))
                    train_x[i] = d['data']
                    train_y[i] = d['labels']

                # test set
                file_name = '{}/test_batch'.format(dir_name)
                d = _pickle_load(archive.extractfile(file_name))
                test_x = d['data']
                test_y[...] = d['labels']  # copy to array

            train_x = train_x.reshape(50000, 3072)
            train_y = train_y.reshape(50000)
        else:
            # name == 'cifar-100'
            def load(archive, file_name):
                d = _pickle_load(archive.extractfile(file_name))
                x = d['data'].reshape((-1, 3072))
                y = numpy.array(d['fine_labels'], dtype=numpy.uint8)
                return x, y

            with tarfile.open(archive_path, 'r:gz') as archive:
                train_x, train_y = load(archive, 'cifar-100-python/train')
                test_x, test_y = load(archive, 'cifar-100-python/test')

        numpy.savez_compressed(path, train_x=train_x, train_y=train_y,
                               test_x=test_x, test_y=test_y)
        return {'train_x': train_x, 'train_y': train_y,
                'test_x': test_x, 'test_y': test_y}

    raw = download.cache_or_load_file(npz_path, creator, numpy.load)
    train = _preprocess_cifar(raw['train_x'], raw['train_y'], withlabel,
                              ndim, scale, dtype)
    test = _preprocess_cifar(raw['test_x'], raw['test_y'], withlabel, ndim,
                             scale, dtype)
    return train, test 
开发者ID:chainer,项目名称:chainer,代码行数:57,代码来源:cifar.py


注:本文中的chainer.dataset.download.cache_or_load_file方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。