本文整理匯總了Python中urllib.request.urlretrieve方法的典型用法代碼示例。如果您正苦於以下問題:Python request.urlretrieve方法的具體用法?Python request.urlretrieve怎麽用?Python request.urlretrieve使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類urllib.request
的用法示例。
在下文中一共展示了request.urlretrieve方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: maybe_download
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import urlretrieve [as 別名]
def maybe_download(filename, url, destination_dir, expected_bytes = None, force = False):
filepath = os.path.join(destination_dir, filename)
if force or not os.path.exists(filepath):
if not os.path.exists(destination_dir):
os.makedirs(destination_dir)
print('Attempting to download: ' + filename)
filepath, _ = urlretrieve(url, filepath, reporthook = progress_bar)
print('Download complete!')
statinfo = os.stat(filepath)
if expected_bytes != None:
if statinfo.st_size == expected_bytes:
print('Found and verified: ' + filename)
else:
raise Exception('Failed to verify: ' + filename + '. Can you get to it with a browser?')
else:
print('Found: ' + filename)
print('The size of the file: ' + str(statinfo.st_size))
return filepath
示例2: _checkout
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import urlretrieve [as 別名]
def _checkout(self, local_dir):
user = self.config.get("user")
repo = self.config.get("repo")
version = self.config.get("version", "master")
# TODO : Sanitize URL
url = URL.format(user=user, repo=repo, version=version)
logger.info("Downloading {}/{} from github".format(user, repo))
try:
(filename, headers) = urllib.urlretrieve(url)
except URLError as e:
raise RuntimeError("Failed to download '{}'. '{}'".format(url, e.reason))
t = tarfile.open(filename)
(cache_root, core) = os.path.split(local_dir)
# Ugly hack to get the first part of the directory name of the extracted files
tmp = t.getnames()[0]
t.extractall(cache_root)
os.rename(os.path.join(cache_root, tmp), os.path.join(cache_root, core))
示例3: fetch_url
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import urlretrieve [as 別名]
def fetch_url(url, file_path, progress_bar=_report_hook, remote_checksum=None):
"""Download a remote file
Fetch a dataset pointed to by ``url``, check its SHA-256 checksum for
integrity, and save it to ``file_path``.
Parameters
----------
url : string
URL of file to download
file_path: string
Path to the local file that will be created
progress_bar : func callback, optional, default: built-in
A callback to a function ``func(count, block_size, total_size)`` that
will display a progress bar.
remote_checksum : str, optional, default: None
The expected SHA-256 checksum of the file.
"""
urlretrieve(url, file_path, progress_bar)
checksum = _sha256(file_path)
if remote_checksum != None and remote_checksum != checksum:
raise IOError("{} has an SHA256 checksum ({}) "
"differing from expected ({}), "
"file may be corrupted.".format(file_path, checksum,
remote_checksum))
示例4: fetch_data
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import urlretrieve [as 別名]
def fetch_data():
"""Download the data."""
train_file = tempfile.NamedTemporaryFile()
test_file = tempfile.NamedTemporaryFile()
req.urlretrieve("http://mlr.cs.umass.edu/ml/machine-learning-databases"
"/adult/adult.data", train_file.name)
req.urlretrieve("http://mlr.cs.umass.edu/ml/machine-learning-databases/"
"adult/adult.test", test_file.name)
df_train = pd.read_csv(train_file, names=COLUMNS, skipinitialspace=True)
df_test = pd.read_csv(test_file, names=COLUMNS, skipinitialspace=True,
skiprows=1)
df_train[LABEL_COLUMN] = (df_train["income_bracket"]
.apply(lambda x: ">50K" in x)).astype(int)
df_test[LABEL_COLUMN] = (df_test["income_bracket"]
.apply(lambda x: ">50K" in x)).astype(int)
return df_train, df_test
示例5: loadData
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import urlretrieve [as 別名]
def loadData(src):
print('Downloading ' + src)
fname, h = urlretrieve(src, './delete.me')
print('Done.')
try:
print('Extracting files...')
with tarfile.open(fname) as tar:
tar.extractall()
print('Done.')
print('Preparing train set...')
trn = np.empty((0, numFeature + 1), dtype=np.int)
for i in range(5):
batchName = './cifar-10-batches-py/data_batch_{0}'.format(i + 1)
trn = np.vstack((trn, readBatch(batchName)))
print('Done.')
print('Preparing test set...')
tst = readBatch('./cifar-10-batches-py/test_batch')
print('Done.')
finally:
os.remove(fname)
return (trn, tst)
示例6: loadData
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import urlretrieve [as 別名]
def loadData(src, cimg):
gzfname, h = urlretrieve(src, './delete.me')
try:
with gzip.open(gzfname) as gz:
n = struct.unpack('I', gz.read(4))
if n[0] != 0x3080000:
raise Exception('Invalid file: unexpected magic number.')
n = struct.unpack('>I', gz.read(4))[0]
if n != cimg:
raise Exception('Invalid file: expected {0} entries.'.format(cimg))
crow = struct.unpack('>I', gz.read(4))[0]
ccol = struct.unpack('>I', gz.read(4))[0]
if crow != 28 or ccol != 28:
raise Exception('Invalid file: expected 28 rows/cols per image.')
res = np.fromstring(gz.read(cimg * crow * ccol), dtype=np.uint8)
finally:
os.remove(gzfname)
return res.reshape((cimg, crow * ccol))
示例7: load_or_download_mnist_files
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import urlretrieve [as 別名]
def load_or_download_mnist_files(filename, num_samples, local_data_dir):
if (local_data_dir):
local_path = os.path.join(local_data_dir, filename)
else:
local_path = os.path.join(os.getcwd(), filename)
if os.path.exists(local_path):
gzfname = local_path
else:
local_data_dir = os.path.dirname(local_path)
if not os.path.exists(local_data_dir):
os.makedirs(local_data_dir)
filename = "http://yann.lecun.com/exdb/mnist/" + filename
print ("Downloading from" + filename, end=" ")
gzfname, h = urlretrieve(filename, local_path)
print ("[Done]")
return gzfname
示例8: _install_module
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import urlretrieve [as 別名]
def _install_module(self, fullname):
top, username, repo, modname = self._parse_fullname(fullname)
url = 'https://raw.githubusercontent.com/%s/%s/master/%s' % (username, repo, modname+'.py')
print('Downloading: ', url)
try:
tmp_file, resp = urlretrieve(url)
with open(tmp_file, 'r') as f:
new_content = f.read()
if new_content=='Not Found':
raise InstallError('remote file does not exist')
except IOError:
raise InstallError('error downloading file')
new = tmp_file
old = self._install_path(fullname)
updated = self._update_if_changed(old, new)
if updated=='updated':
print('Updating module: ', fullname)
elif updated=='installed':
print('Installing module: ', fullname)
elif updated=='noaction':
print('Using existing version: ', fullname)
示例9: thread_updater
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import urlretrieve [as 別名]
def thread_updater():
while True:
time.sleep(21600)
if __version__ < latest_ver:
logger.info(
"Your version (%s%s) is out of date, the latest is %s, which has now be downloaded for you into the 'updates' subdirectory." % (
type, __version__, latest_ver))
newfilename = ntpath.basename(latestfile)
if not os.path.isdir('updates'):
os.mkdir('updates')
requests.urlretrieve(latestfile, os.path.join('updates', newfilename))
############################################################
# playlist tools
############################################################
示例10: load_logos
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import urlretrieve [as 別名]
def load_logos(filename):
'''
Load logos from a geologos archive from <filename>
<filename> can be either a local path or a remote URL.
'''
if filename.startswith('http'):
log.info('Downloading GeoLogos bundle: %s', filename)
filename, _ = urlretrieve(filename, tmp.path('geologos.tar.xz'))
log.info('Extracting GeoLogos bundle')
with contextlib.closing(lzma.LZMAFile(filename)) as xz:
with tarfile.open(fileobj=xz, encoding='utf8') as tar:
tar.extractall(tmp.root, members=tar.getmembers())
log.info('Moving to the final location and cleaning up')
if os.path.exists(logos.root):
shutil.rmtree(logos.root)
shutil.move(tmp.path('logos'), logos.root)
log.info('Done')
示例11: import_shapers
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import urlretrieve [as 別名]
def import_shapers(logger):
(_, zip_path) = tempfile.mkstemp()
(_, http_message) = request.urlretrieve(url, zip_path)
zip_file = ZipFile(zip_path)
ex_dir = tempfile.mkdtemp()
zip_file.extractall(ex_dir)
shapefiles = glob.glob1(ex_dir, "*.shp")
lm = LayerMapping(Parcel, "/data/shapefiles/M274TaxPar.shp", {
"shape_leng": "SHAPE_Leng",
"shape_area": "SHAPE_Area",
"map_par_id": "MAP_PAR_ID",
"loc_id": "LOC_ID",
"poly_type": "POLY_TYPE",
"map_no": "MAP_NO",
"source": "SOURCE",
"plan_id": "PLAN_ID",
"last_edit": "LAST_EDIT",
"town_id": "TOWN_ID",
"shape": "POLYGON"
})
示例12: _load_cifar10
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import urlretrieve [as 別名]
def _load_cifar10():
def unflatten(images):
return np.transpose(images.reshape((images.shape[0], 3, 32, 32)),
[0, 2, 3, 1])
with tempfile.NamedTemporaryFile() as f:
request.urlretrieve(URLS['cifar10'], f.name)
tar = tarfile.open(fileobj=f)
train_data_batches, train_data_labels = [], []
for batch in range(1, 6):
data_dict = scipy.io.loadmat(tar.extractfile(
'cifar-10-batches-mat/data_batch_{}.mat'.format(batch)))
train_data_batches.append(data_dict['data'])
train_data_labels.append(data_dict['labels'].flatten())
train_set = {'images': np.concatenate(train_data_batches, axis=0),
'labels': np.concatenate(train_data_labels, axis=0)}
data_dict = scipy.io.loadmat(tar.extractfile(
'cifar-10-batches-mat/test_batch.mat'))
test_set = {'images': data_dict['data'],
'labels': data_dict['labels'].flatten()}
train_set['images'] = _encode_png(unflatten(train_set['images']))
test_set['images'] = _encode_png(unflatten(test_set['images']))
return dict(train=train_set, test=test_set)
示例13: _load_cifar100
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import urlretrieve [as 別名]
def _load_cifar100():
def unflatten(images):
return np.transpose(images.reshape((images.shape[0], 3, 32, 32)),
[0, 2, 3, 1])
with tempfile.NamedTemporaryFile() as f:
request.urlretrieve(URLS['cifar100'], f.name)
tar = tarfile.open(fileobj=f)
data_dict = scipy.io.loadmat(tar.extractfile('cifar-100-matlab/train.mat'))
train_set = {'images': data_dict['data'],
'labels': data_dict['fine_labels'].flatten()}
data_dict = scipy.io.loadmat(tar.extractfile('cifar-100-matlab/test.mat'))
test_set = {'images': data_dict['data'],
'labels': data_dict['fine_labels'].flatten()}
train_set['images'] = _encode_png(unflatten(train_set['images']))
test_set['images'] = _encode_png(unflatten(test_set['images']))
return dict(train=train_set, test=test_set)
# Load a custom dataset from a local directory.
示例14: check_or_download_inception
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import urlretrieve [as 別名]
def check_or_download_inception(inception_path):
""" Checks if the path to the inception file is valid, or downloads
the file if it is not present. """
INCEPTION_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
if inception_path is None:
inception_path = '/tmp'
inception_path = pathlib.Path(inception_path)
model_file = inception_path / 'classify_image_graph_def.pb'
if not model_file.exists():
print("Downloading Inception model")
from urllib import request
import tarfile
fn, _ = request.urlretrieve(INCEPTION_URL)
with tarfile.open(fn, mode='r') as f:
f.extract('classify_image_graph_def.pb', str(model_file.parent))
return str(model_file)
示例15: download_url
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import urlretrieve [as 別名]
def download_url(url, destination=None, progress_bar=True):
def my_hook(t):
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
if tsize is not None:
t.total = tsize
if b > 0:
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
if progress_bar:
with tqdm(unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1]) as t:
filename, _ = urlretrieve(url, filename=destination, reporthook=my_hook(t))
else:
filename, _ = urlretrieve(url, filename=destination)