当前位置: 首页>>代码示例>>Python>>正文


Python data_utils.get_file方法代码示例

本文整理汇总了Python中keras.utils.data_utils.get_file方法的典型用法代码示例。如果您正苦于以下问题:Python data_utils.get_file方法的具体用法?Python data_utils.get_file怎么用?Python data_utils.get_file使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.utils.data_utils的用法示例。


在下文中一共展示了data_utils.get_file方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: load_data

# 需要导入模块: from keras.utils import data_utils [as 别名]
# 或者: from keras.utils.data_utils import get_file [as 别名]
def load_data(path='conll2000.zip', min_freq=2):
    path = get_file(path,
                    origin='https://raw.githubusercontent.com/nltk'
                           '/nltk_data/gh-pages/packages/corpora/conll2000.zip')
    print(path)
    archive = ZipFile(path, 'r')
    train = _parse_data(archive.open('conll2000/train.txt'))
    test = _parse_data(archive.open('conll2000/test.txt'))
    archive.close()

    word_counts = Counter(row[0].lower() for sample in train for row in sample)
    vocab = ['<pad>', '<unk>']
    vocab += [w for w, f in iter(word_counts.items()) if f >= min_freq]
    # in alphabetic order
    pos_tags = sorted(list(set(row[1] for sample in train + test for row in sample)))
    # in alphabetic order
    chunk_tags = sorted(list(set(row[2] for sample in train + test for row in sample)))

    train = _process_data(train, vocab, pos_tags, chunk_tags)
    test = _process_data(test, vocab, pos_tags, chunk_tags)
    return train, test, (vocab, pos_tags, chunk_tags) 
开发者ID:keras-team,项目名称:keras-contrib,代码行数:23,代码来源:conll2000.py

示例2: decode_predictions

# 需要导入模块: from keras.utils import data_utils [as 别名]
# 或者: from keras.utils.data_utils import get_file [as 别名]
def decode_predictions(preds, top=5):
    global CLASS_INDEX
    if len(preds.shape) != 2 or preds.shape[1] != 1000:
        raise ValueError('`decode_predictions` expects '
                         'a batch of predictions '
                         '(i.e. a 2D array of shape (samples, 1000)). '
                         'Found array with shape: ' + str(preds.shape))
    if CLASS_INDEX is None:
        fpath = get_file('imagenet_class_index.json',
                         CLASS_INDEX_PATH,
                         cache_subdir='models')
        CLASS_INDEX = json.load(open(fpath))
    results = []
    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
        results.append(result)
    return results 
开发者ID:fchollet,项目名称:deep-learning-models,代码行数:20,代码来源:imagenet_utils.py

示例3: check_mpii_dataset

# 需要导入模块: from keras.utils import data_utils [as 别名]
# 或者: from keras.utils.data_utils import get_file [as 别名]
def check_mpii_dataset():
    version = 'v0.1'
    try:
        mpii_path = os.path.join(os.getcwd(), 'datasets/MPII/')
        annot_path = get_file(mpii_path + 'annotations.mat',
                ORIGIN + version + '/mpii_annotations.mat',
                md5_hash='cc62b1bb855bf4866d19bc0637526930')

        if os.path.isdir(mpii_path + 'images') is False:
            raise Exception('MPII dataset (images) not found! '
                    'You must download it by yourself from '
                    'http://human-pose.mpi-inf.mpg.de')

    except:
        sys.stderr.write('Error checking MPII dataset!\n')
        raise 
开发者ID:dluvizon,项目名称:deephar,代码行数:18,代码来源:annothelper.py

示例4: check_h36m_dataset

# 需要导入模块: from keras.utils import data_utils [as 别名]
# 或者: from keras.utils.data_utils import get_file [as 别名]
def check_h36m_dataset():
    version = 'v0.2'
    try:
        h36m_path = os.path.join(os.getcwd(), 'datasets/Human3.6M/')
        annot_path = get_file(h36m_path + 'annotations.mat',
                ORIGIN + version + '/h36m_annotations.mat',
                md5_hash='4067d52db61737fbebdec850238d87dd')

        if os.path.isdir(h36m_path + 'images') is False:
            raise Exception('Human3.6M dataset (images) not found! '
                    'You must download it by yourself from '
                    'http://vision.imar.ro/human3.6m '
                    'and extract the video files!')

    except:
        sys.stderr.write('Error checking Human3.6M dataset!\n')
        raise 
开发者ID:dluvizon,项目名称:deephar,代码行数:19,代码来源:annothelper.py

示例5: check_pennaction_dataset

# 需要导入模块: from keras.utils import data_utils [as 别名]
# 或者: from keras.utils.data_utils import get_file [as 别名]
def check_pennaction_dataset():
    version = 'v0.3'
    try:
        penn_path = os.path.join(os.getcwd(), 'datasets/PennAction/')
        annot_path = get_file(penn_path + 'annotations.mat',
                ORIGIN + version + '/penn_annotations.mat',
                md5_hash='b37a2e72c0ba308bd7ad476bc2aa4d33')
        bbox_path = get_file(penn_path + 'penn_pred_bboxes_16f.json',
                ORIGIN + version + '/penn_pred_bboxes_16f.json',
                md5_hash='30b124a919185cb031b928bc6154fa9b')

        if os.path.isdir(penn_path + 'frames') is False:
            raise Exception('PennAction dataset (frames) not found! '
                    'You must download it by yourself from '
                    'http://dreamdragon.github.io/PennAction')

    except:
        sys.stderr.write('Error checking PennAction dataset!\n')
        raise 
开发者ID:dluvizon,项目名称:deephar,代码行数:21,代码来源:annothelper.py

示例6: decode_imagenet_predictions

# 需要导入模块: from keras.utils import data_utils [as 别名]
# 或者: from keras.utils.data_utils import get_file [as 别名]
def decode_imagenet_predictions(preds, top=5):
    global CLASS_INDEX
    if len(preds.shape) != 2 or preds.shape[1] != 1000:
        raise ValueError('`decode_predictions` expects '
                         'a batch of predictions '
                         '(i.e. a 2D array of shape (samples, 1000)). '
                         'Found array with shape: ' + str(preds.shape))
    if CLASS_INDEX is None:
        fpath = get_file('imagenet_class_index.json',
                         CLASS_INDEX_PATH,
                         cache_subdir='models')
        CLASS_INDEX = json.load(open(fpath))
    results = []

    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
        results.append(result)
    return results 
开发者ID:Shobhit20,项目名称:Image-Captioning,代码行数:21,代码来源:imagenet_utils.py

示例7: get_imagenet_weights

# 需要导入模块: from keras.utils import data_utils [as 别名]
# 或者: from keras.utils.data_utils import get_file [as 别名]
def get_imagenet_weights(self):
        """Downloads ImageNet trained weights from Keras.
        Returns path to weights file.
        """
        from keras.utils.data_utils import get_file
        TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
                                 'releases/download/v0.2/'\
                                 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
        weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                TF_WEIGHTS_PATH_NO_TOP,
                                cache_subdir='models',
                                md5_hash='a268eb855778b3df3c7506639542a6af')
        return weights_path 
开发者ID:dataiku,项目名称:dataiku-contrib,代码行数:15,代码来源:model.py

示例8: decode_predictions

# 需要导入模块: from keras.utils import data_utils [as 别名]
# 或者: from keras.utils.data_utils import get_file [as 别名]
def decode_predictions(preds):
    global CLASS_INDEX
    assert len(preds.shape) == 2 and preds.shape[1] == 1000
    if CLASS_INDEX is None:
        fpath = get_file('imagenet_class_index.json',
                         CLASS_INDEX_PATH,
                         cache_subdir='models')
        CLASS_INDEX = json.load(open(fpath))
    indices = np.argmax(preds, axis=-1)
    results = []
    for i in indices:
        results.append(CLASS_INDEX[str(i)])
    return results 
开发者ID:ChunML,项目名称:DeepLearning,代码行数:15,代码来源:imagenet_utils.py

示例9: get_imagenet_weights

# 需要导入模块: from keras.utils import data_utils [as 别名]
# 或者: from keras.utils.data_utils import get_file [as 别名]
def get_imagenet_weights(self):
        """Downloads ImageNet trained weights from Keras.
        Returns path to weights file.
        """
        from keras.utils.data_utils import get_file
        TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/' \
                                 'releases/download/v0.2/' \
                                 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
        weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                TF_WEIGHTS_PATH_NO_TOP,
                                cache_subdir='models',
                                md5_hash='a268eb855778b3df3c7506639542a6af')
        return weights_path 
开发者ID:SunskyF,项目名称:EasyPR-python,代码行数:15,代码来源:model.py

示例10: get_file

# 需要导入模块: from keras.utils import data_utils [as 别名]
# 或者: from keras.utils.data_utils import get_file [as 别名]
def get_file(model_weights: ModelWeights):
    return gf(model_weights.name, model_weights.path, cache_subdir=RECURRENT_GAZE_DIR) 
开发者ID:crisie,项目名称:RecurrentGaze,代码行数:4,代码来源:experiment_utils.py

示例11: __init__

# 需要导入模块: from keras.utils import data_utils [as 别名]
# 或者: from keras.utils.data_utils import get_file [as 别名]
def __init__(self, inputs, blocks, weights=None,
                 trainable=True, name='encoder'):
        inverse_pyramid = []

        # convolutional block
        conv_blocks = blocks[:-1]
        for i, block in enumerate(conv_blocks):
            if i == 0:
                x = block(inputs)
                inverse_pyramid.append(x)
            elif i < len(conv_blocks) - 1:
                x = block(x)
                inverse_pyramid.append(x)
            else:
                x = block(x)

        # fully convolutional block
        fc_block = blocks[-1]
        y = fc_block(x)
        inverse_pyramid.append(y)

        outputs = list(reversed(inverse_pyramid))

        super(Encoder, self).__init__(
            inputs=inputs, outputs=outputs)

        # load pre-trained weights
        if weights is not None:
            weights_path = get_file(
                '{}_weights_tf_dim_ordering_tf_kernels.h5'.format(name),
                weights,
                cache_subdir='models')
            layer_names = load_weights(self, weights_path)
            if K.image_data_format() == 'channels_first':
                layer_utils.convert_all_kernels_in_model(self)

        # Freezing basenet weights
        if trainable is False:
            for layer in self.layers:
                if layer.name in layer_names:
                    layer.trainable = False 
开发者ID:JihongJu,项目名称:keras-fcn,代码行数:43,代码来源:encoders.py

示例12: get_file

# 需要导入模块: from keras.utils import data_utils [as 别名]
# 或者: from keras.utils.data_utils import get_file [as 别名]
def get_file(uri, extract=False):

    if '://' not in uri:
        return uri
        # uri = 'file://' + uri

    fname = uri.split('/')[-1]
    local_path = keras_get_file(
        fname, uri,
        extract=extract,
        cache_subdir='models')

    return local_path 
开发者ID:lunardog,项目名称:kelner,代码行数:15,代码来源:utils.py

示例13: load_data

# 需要导入模块: from keras.utils import data_utils [as 别名]
# 或者: from keras.utils.data_utils import get_file [as 别名]
def load_data(data_file, url):
    """loads the data from the gzip pickled files, and converts to numpy arrays"""
    print('loading data ...')
    path = get_file(data_file, origin=url)
    f = gzip.open(path, 'rb')
    train_set, valid_set, test_set = load_pickle(f)
    f.close()

    train_set_x, train_set_y = make_numpy_array(train_set)
    valid_set_x, valid_set_y = make_numpy_array(valid_set)
    test_set_x, test_set_y = make_numpy_array(test_set)

    return [(train_set_x, train_set_y), (valid_set_x, valid_set_y), (test_set_x, test_set_y)] 
开发者ID:VahidooX,项目名称:DeepCCA,代码行数:15,代码来源:utils.py

示例14: get_densenet_weights_path

# 需要导入模块: from keras.utils import data_utils [as 别名]
# 或者: from keras.utils.data_utils import get_file [as 别名]
def get_densenet_weights_path(dataset_name="CIFAR-10", include_top=True):
    assert dataset_name == "CIFAR-10"
    if include_top:
        weights_path = get_file('densenet_40_12_tf_dim_ordering_tf_kernels.h5',
                                TF_WEIGHTS_PATH,
                                cache_subdir='models')
    else:
        weights_path = get_file('densenet_40_12_tf_dim_ordering_tf_kernels_no_top.h5',
                                TF_WEIGHTS_PATH_NO_TOP,
                                cache_subdir='models')
    return weights_path 
开发者ID:mzweilin,项目名称:EvadeML-Zoo,代码行数:13,代码来源:densenet_models.py

示例15: test_data_utils

# 需要导入模块: from keras.utils import data_utils [as 别名]
# 或者: from keras.utils.data_utils import get_file [as 别名]
def test_data_utils(in_tmpdir):
    """Tests get_file from a url, plus extraction and validation.
    """
    dirname = 'data_utils'

    with open('test.txt', 'w') as text_file:
        text_file.write('Float like a butterfly, sting like a bee.')

    with tarfile.open('test.tar.gz', 'w:gz') as tar_file:
        tar_file.add('test.txt')

    with zipfile.ZipFile('test.zip', 'w') as zip_file:
        zip_file.write('test.txt')

    origin = urljoin('file://', pathname2url(os.path.abspath('test.tar.gz')))

    path = get_file(dirname, origin, untar=True)
    filepath = path + '.tar.gz'
    hashval_sha256 = _hash_file(filepath)
    hashval_md5 = _hash_file(filepath, algorithm='md5')
    path = get_file(dirname, origin, md5_hash=hashval_md5, untar=True)
    path = get_file(filepath, origin, file_hash=hashval_sha256, extract=True)
    assert os.path.exists(filepath)
    assert validate_file(filepath, hashval_sha256)
    assert validate_file(filepath, hashval_md5)
    os.remove(filepath)
    os.remove('test.tar.gz')

    origin = urljoin('file://', pathname2url(os.path.abspath('test.zip')))

    hashval_sha256 = _hash_file('test.zip')
    hashval_md5 = _hash_file('test.zip', algorithm='md5')
    path = get_file(dirname, origin, md5_hash=hashval_md5, extract=True)
    path = get_file(dirname, origin, file_hash=hashval_sha256, extract=True)
    assert os.path.exists(path)
    assert validate_file(path, hashval_sha256)
    assert validate_file(path, hashval_md5)

    os.remove(path)
    os.remove('test.txt')
    os.remove('test.zip') 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:43,代码来源:data_utils_test.py


注:本文中的keras.utils.data_utils.get_file方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。