本文整理汇总了Python中gluoncv.utils.download方法的典型用法代码示例。如果您正苦于以下问题:Python utils.download方法的具体用法?Python utils.download怎么用?Python utils.download使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gluoncv.utils
的用法示例。
在下文中一共展示了utils.download方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: download_vg
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import download [as 别名]
def download_vg(path, overwrite=False):
_DOWNLOAD_URLS = [
('https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip',
'a055367f675dd5476220e9b93e4ca9957b024b94'),
('https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip',
'2add3aab77623549e92b7f15cda0308f50b64ecf'),
]
makedirs(path)
for url, checksum in _DOWNLOAD_URLS:
filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum)
# extract
if filename.endswith('zip'):
with zipfile.ZipFile(filename) as zf:
zf.extractall(path=path)
# move all images into folder `VG_100K`
vg_100k_path = os.path.join(path, 'VG_100K')
vg_100k_2_path = os.path.join(path, 'VG_100K_2')
files_2 = os.listdir(vg_100k_2_path)
for fl in files_2:
shutil.move(os.path.join(vg_100k_2_path, fl),
os.path.join(vg_100k_path, fl))
示例2: read_data
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import download [as 别名]
def read_data(opt):
"""
Pre-process data
--------------------
Next we need a video or video frame
if you want to test video frame, you can change opt.video_loader to False
and opt.data-dir is your video frame path.
meanwhile you need first frame object coordinates in opt.gt-bbox
gt_bbox is first frame object coordinates, and it is bbox(center_x,center_y,weight,height)
"""
video_frames = []
if opt.video_loader:
im_video = utils.download(opt.video_path)
cap = cv2.VideoCapture(im_video)
while(True):
ret, img = cap.read()
if not ret:
break
video_frames.append(img)
else:
for data in sorted(os.listdir(opt.data_dir)):
video_frames.append(cv2.imread(os.path.join(opt.data_dir, data)))
return video_frames
示例3: main
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import download [as 别名]
def main():
args = parse_args()
name = "Market-1501-v15.09.15"
url = "http://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/{name}.zip".format(name=name)
root = osp.expanduser(args.download_dir)
makedirs(root)
fpath = osp.join(root, name + '.zip')
exdir = osp.join(root, name)
if not osp.exists(fpath) and not osp.isdir(exdir) and args.no_download:
raise ValueError(('{} dataset archive not found, make sure it is present.'
' Or you should not disable "--no-download" to grab it'.format(fpath)))
# Download by default
if not args.no_download:
print('Downloading dataset')
download(url, fpath, overwrite=False)
print('Dataset downloaded')
# Extract dataset if fresh copy downloaded or existing archive is yet to be extracted
if not args.no_download or not osp.isdir(exdir):
extract(fpath, root)
make_list(exdir)
示例4: download_city
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import download [as 别名]
def download_city(path, overwrite=False):
_CITY_DOWNLOAD_URLS = [
('gtFine_trainvaltest.zip', '99f532cb1af174f5fcc4c5bc8feea8c66246ddbc'),
('leftImg8bit_trainvaltest.zip', '2c0b77ce9933cc635adda307fbba5566f5d9d404')]
download_dir = os.path.join(path, 'downloads')
makedirs(download_dir)
for filename, checksum in _CITY_DOWNLOAD_URLS:
if not check_sha1(filename, checksum):
raise UserWarning('File {} is downloaded but the content hash does not match. ' \
'The repo may be outdated or download may be incomplete. ' \
'If the "repo_url" is overridden, consider switching to ' \
'the default repo.'.format(filename))
# extract
with zipfile.ZipFile(filename,"r") as zip_ref:
zip_ref.extractall(path=path)
print("Extracted", filename)
示例5: download_voc
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import download [as 别名]
def download_voc(path, overwrite=False):
_DOWNLOAD_URLS = [
('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar',
'34ed68851bce2a36e2a223fa52c661d592c66b3c'),
('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar',
'41a8d6e12baa5ab18ee7f8f8029b9e11805b4ef1'),
('http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
'4e443f8a2eca6b1dac8a6c57641b67dd40621a49')]
makedirs(path)
for url, checksum in _DOWNLOAD_URLS:
filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum)
# extract
with tarfile.open(filename) as tar:
tar.extractall(path=path)
#####################################################################################
# Download and extract the VOC augmented segmentation dataset into ``path``
示例6: download_aug
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import download [as 别名]
def download_aug(path, overwrite=False):
_AUG_DOWNLOAD_URLS = [
('http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz', '7129e0a480c2d6afb02b517bb18ac54283bfaa35')]
makedirs(path)
for url, checksum in _AUG_DOWNLOAD_URLS:
filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum)
# extract
with tarfile.open(filename) as tar:
tar.extractall(path=path)
shutil.move(os.path.join(path, 'benchmark_RELEASE'),
os.path.join(path, 'VOCaug'))
filenames = ['VOCaug/dataset/train.txt', 'VOCaug/dataset/val.txt']
# generate trainval.txt
with open(os.path.join(path, 'VOCaug/dataset/trainval.txt'), 'w') as outfile:
for fname in filenames:
fname = os.path.join(path, fname)
with open(fname) as infile:
for line in infile:
outfile.write(line)
示例7: download_coco
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import download [as 别名]
def download_coco(args, overwrite=False):
"""download COCO dataset and Unzip to download_dir"""
_DOWNLOAD_URLS = [
('http://images.cocodataset.org/zips/train2017.zip',
'10ad623668ab00c62c096f0ed636d6aff41faca5'),
('http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
'8551ee4bb5860311e79dace7e79cb91e432e78b3'),
('http://images.cocodataset.org/zips/val2017.zip',
'4950dc9d00dbe1c933ee0170f5797584351d2a41'),
]
if not os.path.isdir(args.download_dir):
makedirs(args.download_dir)
for url, checksum in _DOWNLOAD_URLS:
filename = download(url, path=args.download_dir, overwrite=overwrite, sha1_hash=checksum)
with zipfile.ZipFile(filename) as zf:
zf.extractall(path=args.download_dir)
示例8: parse_args
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import download [as 别名]
def parse_args():
parser = argparse.ArgumentParser(
description='Setup the ImageNet dataset.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--download-dir', required=True,
help="The directory that contains downloaded tar files")
parser.add_argument('--target-dir', default=_TARGET_DIR,
help="The directory to store extracted images")
parser.add_argument('--checksum', action='store_true',
help="If check integrity before extracting.")
parser.add_argument('--with-rec', action='store_true',
help="If build image record files.")
parser.add_argument('--num-thread', type=int, default=1,
help="Number of threads to use when building image record file.")
args = parser.parse_args()
return args
示例9: main
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import download [as 别名]
def main(args):
# download VID dataset
download_VID(args)
print('VID dataset has already download completed')
VID_base_path = os.path.join(args.download_dir, 'ILSVRC2015')
ann_base_path = os.path.join(VID_base_path, 'Annotations/VID/train/')
symlink(args)
# Format XML and save it in JSON
parse_vid(ann_base_path, args)
print('VID dataset json has already generat completed')
# crop VID dataset for prepare for tracking
par_crop(args, ann_base_path)
print('VID dataset has already crop completed')
# generat VID json for prepare for tracking
gen_json(args)
print('VID dataset has already generat completed')
示例10: download_coco
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import download [as 别名]
def download_coco(path, overwrite=False):
_DOWNLOAD_URLS = [
('http://images.cocodataset.org/zips/train2017.zip',
'10ad623668ab00c62c096f0ed636d6aff41faca5'),
('http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
'8551ee4bb5860311e79dace7e79cb91e432e78b3'),
('http://images.cocodataset.org/zips/val2017.zip',
'4950dc9d00dbe1c933ee0170f5797584351d2a41'),
# ('http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip',
# '46cdcf715b6b4f67e980b529534e79c2edffe084'),
# test2017.zip, for those who want to attend the competition.
# ('http://images.cocodataset.org/zips/test2017.zip',
# '4e443f8a2eca6b1dac8a6c57641b67dd40621a49'),
]
makedirs(path)
for url, checksum in _DOWNLOAD_URLS:
filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum)
# extract
with zipfile.ZipFile(filename) as zf:
zf.extractall(path=path)
示例11: download_voc
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import download [as 别名]
def download_voc(path, overwrite=False):
_DOWNLOAD_URLS = [
('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar',
'34ed68851bce2a36e2a223fa52c661d592c66b3c'),
('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar',
'41a8d6e12baa5ab18ee7f8f8029b9e11805b4ef1'),
('http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
'4e443f8a2eca6b1dac8a6c57641b67dd40621a49')]
makedirs(path)
for url, checksum in _DOWNLOAD_URLS:
filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum)
# extract
with tarfile.open(filename) as tar:
tar.extractall(path=path)
#####################################################################################
# Download and extract the VOC augmented segementation dataset into ``path``
示例12: download_coco
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import download [as 别名]
def download_coco(path, overwrite=False):
_DOWNLOAD_URLS = [
('http://images.cocodataset.org/zips/train2017.zip',
'10ad623668ab00c62c096f0ed636d6aff41faca5'),
('http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
'8551ee4bb5860311e79dace7e79cb91e432e78b3'),
('http://images.cocodataset.org/zips/val2017.zip',
'4950dc9d00dbe1c933ee0170f5797584351d2a41'),
('http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip',
'e7aa0f7515c07e23873a9f71d9095b06bcea3e12'),
# test2017.zip, for those who want to attend the competition.
# ('http://images.cocodataset.org/zips/test2017.zip',
# '4e443f8a2eca6b1dac8a6c57641b67dd40621a49'),
]
makedirs(path)
for url, checksum in _DOWNLOAD_URLS:
filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum)
# extract
with zipfile.ZipFile(filename) as zf:
zf.extractall(path=path)
示例13: parse_args
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import download [as 别名]
def parse_args():
parser = argparse.ArgumentParser(
description='Initialize Visual Genome dataset.',
epilog='Example: python visualgenome.py --download-dir ~/visualgenome',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--download-dir', type=str, default='~/visualgenome/',
help='dataset directory on disk')
parser.add_argument('--no-download', action='store_true', help='disable automatic download if set')
parser.add_argument('--overwrite', action='store_true', help='overwrite downloaded files if set, in case they are corrupted')
args = parser.parse_args()
return args
示例14: download_json
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import download [as 别名]
def download_json(path, overwrite=False):
url = 'https://data.dgl.ai/dataset/vg.zip'
output = 'vg.zip'
download(url, path=path)
with zipfile.ZipFile(output) as zf:
zf.extractall(path=path)
json_path = os.path.join(path, 'vg')
json_files = os.listdir(json_path)
for fl in json_files:
shutil.move(os.path.join(json_path, fl),
os.path.join(path, fl))
os.rmdir(json_path)
示例15: parse_args
# 需要导入模块: from gluoncv import utils [as 别名]
# 或者: from gluoncv.utils import download [as 别名]
def parse_args():
parser = argparse.ArgumentParser(
description='Initialize ADE20K dataset.',
epilog='Example: python setup_ade20k.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--download-dir', default=None, help='dataset directory on disk')
args = parser.parse_args()
return args