本文整理汇总了Python中urllib.urlretrieve方法的典型用法代码示例。如果您正苦于以下问题:Python urllib.urlretrieve方法的具体用法?Python urllib.urlretrieve怎么用?Python urllib.urlretrieve使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类urllib
的用法示例。
在下文中一共展示了urllib.urlretrieve方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_mnist
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlretrieve [as 别名]
def get_mnist(data_dir):
if not os.path.isdir(data_dir):
os.system("mkdir " + data_dir)
os.chdir(data_dir)
if (not os.path.exists('train-images-idx3-ubyte')) or \
(not os.path.exists('train-labels-idx1-ubyte')) or \
(not os.path.exists('t10k-images-idx3-ubyte')) or \
(not os.path.exists('t10k-labels-idx1-ubyte')):
import urllib, zipfile
zippath = os.path.join(os.getcwd(), "mnist.zip")
urllib.urlretrieve("http://data.mxnet.io/mxnet/data/mnist.zip", zippath)
zf = zipfile.ZipFile(zippath, "r")
zf.extractall()
zf.close()
os.remove(zippath)
os.chdir("..")
示例2: get_cifar10
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlretrieve [as 别名]
def get_cifar10(data_dir):
if not os.path.isdir(data_dir):
os.system("mkdir " + data_dir)
cwd = os.path.abspath(os.getcwd())
os.chdir(data_dir)
if (not os.path.exists('train.rec')) or \
(not os.path.exists('test.rec')) :
import urllib, zipfile, glob
dirname = os.getcwd()
zippath = os.path.join(dirname, "cifar10.zip")
urllib.urlretrieve("http://data.mxnet.io/mxnet/data/cifar10.zip", zippath)
zf = zipfile.ZipFile(zippath, "r")
zf.extractall()
zf.close()
os.remove(zippath)
for f in glob.glob(os.path.join(dirname, "cifar", "*")):
name = f.split(os.path.sep)[-1]
os.rename(f, os.path.join(dirname, name))
os.rmdir(os.path.join(dirname, "cifar"))
os.chdir(cwd)
# data
示例3: _checkout
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlretrieve [as 别名]
def _checkout(self, local_dir):
user = self.config.get("user")
repo = self.config.get("repo")
version = self.config.get("version", "master")
# TODO : Sanitize URL
url = URL.format(user=user, repo=repo, version=version)
logger.info("Downloading {}/{} from github".format(user, repo))
try:
(filename, headers) = urllib.urlretrieve(url)
except URLError as e:
raise RuntimeError("Failed to download '{}'. '{}'".format(url, e.reason))
t = tarfile.open(filename)
(cache_root, core) = os.path.split(local_dir)
# Ugly hack to get the first part of the directory name of the extracted files
tmp = t.getnames()[0]
t.extractall(cache_root)
os.rename(os.path.join(cache_root, tmp), os.path.join(cache_root, core))
示例4: download
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlretrieve [as 别名]
def download( self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print 'Please specify target directory'
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urllib.urlretrieve(img['coco_url'], fname)
print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic)
示例5: downloadUpdate
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlretrieve [as 别名]
def downloadUpdate(self, url, ext, *args):
""" Download file from given url adrees and ask user to choose folder and file name to save
"""
extFilter = "*."+ext
downloadFolder = cmds.fileDialog2(fileFilter=extFilter, dialogStyle=2)
if downloadFolder:
cmds.progressWindow(title='Download Update', progress=50, status='Downloading...', isInterruptable=False)
try:
urllib.urlretrieve(url, downloadFolder[0])
self.info('i094_downloadUpdate', 'i096_downloaded', downloadFolder[0]+'\n\n'+self.langDic[self.langName]['i018_thanks'], 'center', 205, 270)
# closes dpUpdateWindow:
if cmds.window('dpUpdateWindow', query=True, exists=True):
cmds.deleteUI('dpUpdateWindow', window=True)
except:
self.info('i094_downloadUpdate', 'e009_failDownloadUpdate', downloadFolder[0]+'\n\n'+self.langDic[self.langName]['i097_sorry'], 'center', 205, 270)
cmds.progressWindow(endProgress=True)
示例6: _fetch_easylist
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlretrieve [as 别名]
def _fetch_easylist(self):
'''
Downloads the latest version of easylist, and if newer replaces any
existing one.
'''
tmp_easylist = "tmp_"+self.EASYLIST
cur_version = self._easylist_version()
# download latest easylist from the Internet
urllib.urlretrieve(self.EASYLIST_URL,tmp_easylist)
tmp_version = self._easylist_version(path=tmp_easylist)
# if necessary update
if tmp_version > cur_version and cur_version != -1:
os.remove(self.EASYLIST)
shutil.move(tmp_easylist,self.EASYLIST)
print ("Updated easylist from {} to {}".format(cur_version,tmp_version))
elif cur_version == -1:
shutil.move(tmp_easylist,self.EASYLIST)
print("New easylist {}".format(tmp_version))
else:
os.remove(tmp_easylist)
print("Easylist already up to date at: {}".format(tmp_version))
示例7: download_and_extract
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlretrieve [as 别名]
def download_and_extract():
"""
Download and extract the WOS datasets
:return: None
"""
dest_directory = DATA_DIR
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
path = os.path.abspath(dest_directory)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\rDownloading %s %.2f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.urlretrieve(DATA_URL, filepath, reporthook=_progress)
print('Downloaded', filename)
tarfile.open(filepath, 'r').extractall(dest_directory)
return path
示例8: load
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlretrieve [as 别名]
def load(batch_size, test_batch_size, n_labelled=None):
filepath = '/tmp/mnist.pkl.gz'
url = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
if not os.path.isfile(filepath):
print "Couldn't find MNIST dataset in /tmp, downloading..."
urllib.urlretrieve(url, filepath)
with gzip.open('/tmp/mnist.pkl.gz', 'rb') as f:
train_data, dev_data, test_data = pickle.load(f)
return (
mnist_generator(train_data, batch_size, n_labelled),
mnist_generator(dev_data, test_batch_size, n_labelled),
mnist_generator(test_data, test_batch_size, n_labelled)
)
示例9: get_model_file
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlretrieve [as 别名]
def get_model_file(file_path, checksum, output_dir, push_list):
filename = file_path.split('/')[-1]
if file_path.startswith("http"):
local_file_path = output_dir + '/' + filename
if not os.path.exists(local_file_path) \
or bench_utils.file_checksum(local_file_path) != checksum:
print("downloading %s..." % filename)
urllib.urlretrieve(file_path, local_file_path)
aibench_check(bench_utils.file_checksum(local_file_path) == checksum,
"file %s md5 checksum not match" % filename)
else:
local_file_path = file_path
aibench_check(bench_utils.file_checksum(local_file_path) == checksum,
"file %s md5 checksum not match" % filename)
push_list.append(local_file_path)
示例10: test_copy
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlretrieve [as 别名]
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % test_support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.urlretrieve(self.constructLocalFileUrl(
test_support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = file(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
示例11: test_short_content_raises_ContentTooShortError
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlretrieve [as 别名]
def test_short_content_raises_ContentTooShortError(self):
self.fakehttp('''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
def _reporthook(par1, par2, par3):
pass
try:
self.assertRaises(urllib.ContentTooShortError, urllib.urlretrieve,
'http://example.com', reporthook=_reporthook)
finally:
self.unfakehttp()
示例12: downloadImages
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlretrieve [as 别名]
def downloadImages(url):
page = BeautifulSoup(urllib2.urlopen(url))
images = set([ img['src'] for img in page.findAll('img') ])
print '[i] Downloading {} images...'.format(len(images))
count = 0
for image in images:
name = image.split('/')[-1].replace('%20', ' ')
try:
count += 1
urllib.urlretrieve(image, path_download_images + name)
except:
count -= 1
continue
print ' {}. Downloaded `{}`'.format(count, name)
print '[i] Success downloaded {} images to path `{}`'.format(count, path_download_images)
示例13: load
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlretrieve [as 别名]
def load(batch_size, test_batch_size):
filepath = '/tmp/mnist.pkl.gz'
url = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
if not os.path.isfile(filepath):
print "Couldn't find MNIST dataset in /tmp, downloading..."
urllib.urlretrieve(url, filepath)
with gzip.open('/tmp/mnist.pkl.gz', 'rb') as f:
train_data, dev_data, test_data = pickle.load(f)
return (
mnist_generator(train_data, batch_size),
mnist_generator(dev_data, test_batch_size),
mnist_generator(test_data, test_batch_size)
)
示例14: loadData
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlretrieve [as 别名]
def loadData(src):
print('Downloading ' + src)
fname, h = urlretrieve(src, './delete.me')
print('Done.')
try:
print('Extracting files...')
with tarfile.open(fname) as tar:
tar.extractall()
print('Done.')
print('Preparing train set...')
trn = np.empty((0, numFeature + 1), dtype=np.int)
for i in range(5):
batchName = './cifar-10-batches-py/data_batch_{0}'.format(i + 1)
trn = np.vstack((trn, readBatch(batchName)))
print('Done.')
print('Preparing test set...')
tst = readBatch('./cifar-10-batches-py/test_batch')
print('Done.')
finally:
os.remove(fname)
return (trn, tst)
示例15: loadData
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import urlretrieve [as 别名]
def loadData(src, cimg):
gzfname, h = urlretrieve(src, './delete.me')
try:
with gzip.open(gzfname) as gz:
n = struct.unpack('I', gz.read(4))
if n[0] != 0x3080000:
raise Exception('Invalid file: unexpected magic number.')
n = struct.unpack('>I', gz.read(4))[0]
if n != cimg:
raise Exception('Invalid file: expected {0} entries.'.format(cimg))
crow = struct.unpack('>I', gz.read(4))[0]
ccol = struct.unpack('>I', gz.read(4))[0]
if crow != 28 or ccol != 28:
raise Exception('Invalid file: expected 28 rows/cols per image.')
res = np.fromstring(gz.read(cimg * crow * ccol), dtype=np.uint8)
finally:
os.remove(gzfname)
return res.reshape((cimg, crow * ccol))