当前位置: 首页>>代码示例>>Python>>正文


Python download.get_dataset_directory方法代码示例

本文整理汇总了Python中chainer.dataset.download.get_dataset_directory方法的典型用法代码示例。如果您正苦于以下问题:Python download.get_dataset_directory方法的具体用法?Python download.get_dataset_directory怎么用?Python download.get_dataset_directory使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer.dataset.download的用法示例。


在下文中一共展示了download.get_dataset_directory方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _retrieve_word_vocabulary

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import get_dataset_directory [as 别名]
def _retrieve_word_vocabulary():
    def creator(path):
        words = _load_words(_train_url)
        vocab = {}
        index = 0
        with open(path, 'w') as f:
            for word in words:
                if word not in vocab:
                    vocab[word] = index
                    index += 1
                    f.write(word + '\n')

        return vocab

    def loader(path):
        vocab = {}
        with open(path) as f:
            for i, word in enumerate(f):
                vocab[word.strip()] = i
        return vocab

    root = download.get_dataset_directory('pfnet/chainer/ptb')
    path = os.path.join(root, 'vocab.txt')
    return download.cache_or_load_file(path, creator, loader) 
开发者ID:chainer,项目名称:chainer,代码行数:26,代码来源:ptb.py

示例2: __init__

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import get_dataset_directory [as 别名]
def __init__(self, data_dir='auto'):
        super(CityscapesTestImageDataset, self).__init__()

        if data_dir == 'auto':
            data_dir = download.get_dataset_directory(
                'pfnet/chainercv/cityscapes')

        img_dir = os.path.join(data_dir, os.path.join('leftImg8bit', 'test'))
        if not os.path.exists(img_dir):
            raise ValueError(
                'Cityscapes dataset does not exist at the expected location.'
                'Please download it from https://www.cityscapes-dataset.com/.'
                'Then place directory leftImg8bit at {}.'.format(
                    os.path.join(data_dir, 'leftImg8bit')))

        self.img_paths = []
        for city_dname in sorted(glob.glob(os.path.join(img_dir, '*'))):
            for img_path in sorted(glob.glob(
                    os.path.join(city_dname, '*_leftImg8bit.png'))):
                self.img_paths.append(img_path)

        self.add_getter('img', self._get_image)
        self.keys = 'img'  # do not return tuple 
开发者ID:chainer,项目名称:chainercv,代码行数:25,代码来源:cityscapes_test_image_dataset.py

示例3: get_sbd

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import get_dataset_directory [as 别名]
def get_sbd():
    # To support ChainerMN, the target directory should be locked.
    with filelock.FileLock(os.path.join(download.get_dataset_directory(
            'pfnet/chainercv/.lock'), 'sbd.lock')):
        data_root = download.get_dataset_directory(root)
        base_path = os.path.join(data_root, 'benchmark_RELEASE/dataset')

        train_voc2012_file = os.path.join(base_path, 'train_voc2012.txt')
        if os.path.exists(train_voc2012_file):
            # skip downloading
            return base_path

        download_file_path = utils.cached_download(url)
        ext = os.path.splitext(url)[1]
        utils.extractall(download_file_path, data_root, ext)

        six.moves.urllib.request.urlretrieve(
            train_voc2012_url, train_voc2012_file)
        _generate_voc2012_txt(base_path)

    return base_path 
开发者ID:chainer,项目名称:chainercv,代码行数:23,代码来源:sbd_utils.py

示例4: get_atom_init_json_filepath

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import get_dataset_directory [as 别名]
def get_atom_init_json_filepath(download_if_not_exist=True):
    """Construct a filepath which stores atom_init_json

    This method check whether the file exist or not,  and downloaded it if
    necessary.

    Args:
        download_if_not_exist (bool): If `True` download dataset
            if it is not downloaded yet.

    Returns (str): file path for atom_init_json
    """
    cache_root = download.get_dataset_directory(_root)
    cache_path = os.path.join(cache_root, file_name_atom_init_json)
    if not os.path.exists(cache_path) and download_if_not_exist:
        logger = getLogger(__name__)
        logger.info('Downloading atom_init.json...')
        download_file_path = download.cached_download(download_url)
        shutil.copy(download_file_path, cache_path)
    return cache_path 
开发者ID:chainer,项目名称:chainer-chemistry,代码行数:22,代码来源:cgcnn_preprocessor.py

示例5: _get_tox21_filepath

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import get_dataset_directory [as 别名]
def _get_tox21_filepath(dataset_type):
    """Returns a file path in which the tox21 dataset is cached.

    This function returns a file path in which `dataset_type`
    of the tox21 dataset is cached.
    Note that this function does not check if the dataset has actually
    been downloaded or not.

    Args:
        dataset_type(str): Name of the target dataset type.
            Either 'train', 'val', or 'test'.

    Returns (str): file path for the tox21 dataset

    """
    if dataset_type not in _config.keys():
        raise ValueError("Invalid dataset type '{}'. Accepted values are "
                         "'train', 'val' or 'test'.".format(dataset_type))

    c = _config[dataset_type]
    sdffile = c['filename']

    cache_root = download.get_dataset_directory(_root)
    cache_path = os.path.join(cache_root, sdffile)
    return cache_path 
开发者ID:chainer,项目名称:chainer-chemistry,代码行数:27,代码来源:tox21.py

示例6: download_and_store_model

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import get_dataset_directory [as 别名]
def download_and_store_model(alg, url, env, model_type):
    """Downloads a model file and puts it under model directory.

    It downloads a file from the URL and puts it under model directory.
    If there is already a file at the destination path,
    it just returns the path without downloading the same file.
    Args:
        alg (string): String representation of algorithm used in MODELS dict.
        url (string): URL to download from.
        env (string): Environment in which pretrained model was trained.
        model_type (string): Either `best` or `final`.
    Returns:
        string: Path to the downloaded file.
        bool: whether the model was alredy cached.
    """
    with filelock.FileLock(os.path.join(
            get_dataset_directory(os.path.join('pfnet', 'chainerrl', '.lock')),
            'models.lock')):
        root = get_dataset_directory(
            os.path.join('pfnet', 'chainerrl', 'models', alg, env))
        url_basepath = os.path.join(url, alg, env)
        file = model_type + ".zip"
        path = os.path.join(root, file)
        is_cached = os.path.exists(path)
        if not is_cached:
            cache_path = cached_download(os.path.join(url_basepath,
                                                      file))
            os.rename(cache_path, path)
            with zipfile.ZipFile(path, 'r') as zip_ref:
                zip_ref.extractall(root)
        return os.path.join(root, model_type), is_cached 
开发者ID:chainer,项目名称:chainerrl,代码行数:33,代码来源:pretrained_models.py

示例7: _retrieve

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import get_dataset_directory [as 别名]
def _retrieve(n_layers, name_npz, name_caffemodel, model):
    root = download.get_dataset_directory('pfnet/chainer/models/')
    path = os.path.join(root, name_npz)
    path_caffemodel = os.path.join(root, name_caffemodel)
    return download.cache_or_load_file(
        path, lambda path: _make_npz(path, path_caffemodel, model, n_layers),
        lambda path: npz.load_npz(path, model)) 
开发者ID:pfnet-research,项目名称:nips17-adversarial-attack,代码行数:9,代码来源:resnet_layer.py

示例8: download_model

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import get_dataset_directory [as 别名]
def download_model(url):
    """Downloads a model file and puts it under model directory.

    It downloads a file from the URL and puts it under model directory.
    For exmaple, if :obj:`url` is `http://example.com/subdir/model.npz`,
    the pretrained weights file will be saved to
    `$CHAINER_DATASET_ROOT/pfnet/chainercv/models/model.npz`.
    If there is already a file at the destination path,
    it just returns the path without downloading the same file.

    Args:
        url (string): URL to download from.

    Returns:
        string: Path to the downloaded file.

    """
    # To support ChainerMN, the target directory should be locked.
    with filelock.FileLock(os.path.join(
            get_dataset_directory(os.path.join('pfnet', 'chainercv', '.lock')),
            'models.lock')):
        root = get_dataset_directory(
            os.path.join('pfnet', 'chainercv', 'models'))
        basename = os.path.basename(url)
        path = os.path.join(root, basename)
        if not os.path.exists(path):
            cache_path = cached_download(url)
            os.rename(cache_path, path)
        return path 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:31,代码来源:download.py

示例9: setUp

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import get_dataset_directory [as 别名]
def setUp(self):
        self.mnist_root = download.get_dataset_directory(
            os.path.join('pfnet', 'chainer', 'mnist'))
        self.kuzushiji_mnist_root = download.get_dataset_directory(
            os.path.join('pfnet', 'chainer', 'kuzushiji_mnist'))
        self.fashion_mnist_root = download.get_dataset_directory(
            os.path.join('pfnet', 'chainer', 'fashion-mnist')) 
开发者ID:chainer,项目名称:chainer,代码行数:9,代码来源:test_mnist.py

示例10: setUp

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import get_dataset_directory [as 别名]
def setUp(self):
        self.root = download.get_dataset_directory(
            os.path.join('pfnet', 'chainer', 'cifar')) 
开发者ID:chainer,项目名称:chainer,代码行数:5,代码来源:test_cifar.py

示例11: setUp

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import get_dataset_directory [as 别名]
def setUp(self):
        self.root = download.get_dataset_directory(
            os.path.join('pfnet', 'chainer', 'svhn')) 
开发者ID:chainer,项目名称:chainer,代码行数:5,代码来源:test_svhn.py

示例12: _retrieve

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import get_dataset_directory [as 别名]
def _retrieve(name, url, model):
    root = download.get_dataset_directory('pfnet/chainer/models/')
    path = os.path.join(root, name)
    return download.cache_or_load_file(
        path, lambda path: _make_npz(path, url, model),
        lambda path: npz.load_npz(path, model)) 
开发者ID:chainer,项目名称:chainer,代码行数:8,代码来源:vgg.py

示例13: _retrieve_ptb_words

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import get_dataset_directory [as 别名]
def _retrieve_ptb_words(name, url):
    def creator(path):
        vocab = _retrieve_word_vocabulary()
        words = _load_words(url)
        x = numpy.empty(len(words), dtype=numpy.int32)
        for i, word in enumerate(words):
            x[i] = vocab[word]

        numpy.savez_compressed(path, x=x)
        return {'x': x}

    root = download.get_dataset_directory('pfnet/chainer/ptb')
    path = os.path.join(root, name)
    loaded = download.cache_or_load_file(path, creator, numpy.load)
    return loaded['x'] 
开发者ID:chainer,项目名称:chainer,代码行数:17,代码来源:ptb.py

示例14: _retrieve_fashion_mnist

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import get_dataset_directory [as 别名]
def _retrieve_fashion_mnist(name, urls):
    root = download.get_dataset_directory('pfnet/chainer/fashion-mnist')
    path = os.path.join(root, name)
    return download.cache_or_load_file(
        path, lambda path: make_npz(path, urls), numpy.load) 
开发者ID:chainer,项目名称:chainer,代码行数:7,代码来源:fashion_mnist.py

示例15: _retrieve_kuzushiji_mnist

# 需要导入模块: from chainer.dataset import download [as 别名]
# 或者: from chainer.dataset.download import get_dataset_directory [as 别名]
def _retrieve_kuzushiji_mnist(name, urls):
    root = download.get_dataset_directory('pfnet/chainer/kuzushiji_mnist')
    path = os.path.join(root, name)
    return download.cache_or_load_file(
        path, lambda path: make_npz(path, urls), numpy.load) 
开发者ID:chainer,项目名称:chainer,代码行数:7,代码来源:kuzushiji_mnist.py


注:本文中的chainer.dataset.download.get_dataset_directory方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。