本文整理汇总了Python中urllib.URLopener方法的典型用法代码示例。如果您正苦于以下问题:Python urllib.URLopener方法的具体用法?Python urllib.URLopener怎么用?Python urllib.URLopener使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类urllib
的用法示例。
在下文中一共展示了urllib.URLopener方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_model_from_url
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import URLopener [as 别名]
def load_model_from_url(url):
# TODO: move this into a class..
global scoring_model
url_opener = urllib.URLopener()
temp_model_path = get_temp_model_path()
url_opener.retrieve(url, temp_model_path)
# try to load the model:
try:
temp_model = ScoringModel.from_file(temp_model_path)
except Exception as e:
print "Failed to load donwloaded model: %s"%e
os.remove(temp_model_path)
raise RuntimeError("Failed to load donwloaded model! error: %s"%e)
# update model:
scoring_model = temp_model
# delete existing model
if (path.isfile(model_file_path)):
os.remove(model_file_path)
os.rename(temp_model_path, model_file_path)
# TODO: move this to an object with an init function...
示例2: download_file
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import URLopener [as 别名]
def download_file(url, destination=''):
if not destination:
fd, destination = tempfile.mkstemp()
os.remove(destination)
os.close(fd)
if not os.path.isfile(destination):
ctx.logger.info('Downloading {0} to {1}...'.format(
url, destination))
try:
final_url = urllib.urlopen(url).geturl()
if final_url != url:
ctx.logger.debug('Redirected to {0}'.format(final_url))
f = urllib.URLopener()
# TODO: try except with @retry
f.retrieve(final_url, destination)
except Exception:
curl_download_with_retries(url, destination)
else:
ctx.logger.debug('File {0} already exists...'.format(destination))
return destination
示例3: get_all_images
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import URLopener [as 别名]
def get_all_images(*arg):
url = arg[0]
import urllib
links = get_all_images_links(url)
print(links)
if len(arg)>1 and arg[1] == "download":
s = urlparse(url)
seed_page = s.scheme+'://'+s.netloc
i = 0
while i<len(links):
link,flag = url_parse(links[i],seed_page)
print("downloading --> "+link)
try:
file = urllib.URLopener()
file.retrieve(link, str("img "+str(i)+".jpg"))
except:
pass
i = i+1
else:
pass
############## Download Google Images ############
#Finding 'Next Image' from the given raw page for users (image search)
示例4: try_download
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import URLopener [as 别名]
def try_download(_path, _file, _url, _stale,):
now = time()
url = URLopener()
file_exists = isfile(_path+_file) == True
if file_exists:
file_old = (getmtime(_path+_file) + _stale) < now
if not file_exists or (file_exists and file_old):
try:
url.retrieve(_url, _path+_file)
result = 'ID ALIAS MAPPER: \'{}\' successfully downloaded'.format(_file)
except IOError:
result = 'ID ALIAS MAPPER: \'{}\' could not be downloaded'.format(_file)
else:
result = 'ID ALIAS MAPPER: \'{}\' is current, not downloaded'.format(_file)
url.close()
return result
# SHORT VERSION - MAKES A SIMPLE {INTEGER ID: 'CALLSIGN'} DICTIONARY
示例5: _download_data
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import URLopener [as 别名]
def _download_data(self):
if osp.exists(self.dataset_dir):
print("This dataset has been downloaded.")
return
mkdir_if_missing(self.dataset_dir)
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
print("Downloading iLIDS-VID dataset")
url_opener = urllib.URLopener()
url_opener.retrieve(self.dataset_url, fpath)
print("Extracting files")
tar = tarfile.open(fpath)
tar.extractall(path=self.dataset_dir)
tar.close()
示例6: determineCookieHash
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import URLopener [as 别名]
def determineCookieHash(host):
wclient = urllib.URLopener()
print "[+] Connecting to retrieve cookie hash"
try:
req = wclient.open(host + "/wp-login.php?action=logout")
except IOError, e:
if e[1] == 302:
# Got a 302 redirect, but check for cookies before redirecting.
# e[3] is a httplib.HTTPMessage instance.
if e[3].dict.has_key('set-cookie'):
cookie = e[3].dict['set-cookie'];
chash = cookie[string.find(cookie, "user_")+5:]
chash = chash[:string.find(chash, "=")]
print "[+] Cookie hash found: %s" % chash
return chash
示例7: determineIsMbstringInstalled
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import URLopener [as 别名]
def determineIsMbstringInstalled(host, pid):
wclient = urllib.URLopener()
print "[+] Connecting to check if mbstring is installed"
params = {
'charset' : 'UTF-7',
'title' : '+ADA-'
}
try:
req = wclient.open(host + "/wp-trackback.php?p=" + pid, urllib.urlencode(params))
except IOError, e:
if e[1] == 302:
print "[+] ext/mbstring is installed. continue with exploit"
return 1
示例8: determineTablePrefix
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import URLopener [as 别名]
def determineTablePrefix(host, pid):
wclient = urllib.URLopener()
print "[+] Connecting to determine mysql table prefix"
params = {
'charset' : 'UTF-7',
'title' : 'None',
'url' : 'None',
'excerpt' : 'None',
'blog_name' : '+ACc-ILLEGAL'
}
try:
req = wclient.open(host + "/wp-trackback.php?p=" + pid, urllib.urlencode(params))
except IOError, e:
if e[1] == 302:
print "[-] Table prefix cannot be determined... exploit not possible"
sys.exit(-2)
return ""
示例9: main
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import URLopener [as 别名]
def main():
print('Downloading Update from HEAD...')
#Need the cert to access github
os.environ["REQUESTS_CA_BUNDLE"] = os.path.join(os.getcwd(), "cacert.pem")
#Get the Zipfile from Github
base_url='https://github.com/digiholic/universalSmashSystem/archive/master.zip'
page = urllib.urlopen(base_url)
#Download the zipfile
downloader = urllib.URLopener()
downloader.retrieve(page.geturl(), settingsManager.createPath('update.zip'))
#Extract it
updatezip = zipfile.ZipFile(settingsManager.createPath('update.zip'))
updatezip.extractall('tmp')
print('Copying files into game directory...')
#Copy the files upward, then remove the tmp files
tmp_path = settingsManager.createPath('tmp'+os.sep+'universalSmashSystem-master'+os.sep)
recursive_overwrite(tmp_path, settingsManager.createPath(''))
shutil.rmtree(tmp_path)
os.remove(settingsManager.createPath('update.zip'))
print('Done!')
示例10: __init__
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import URLopener [as 别名]
def __init__(self, wsdlsource, config=Config, **kw ):
reader = wstools.WSDLTools.WSDLReader()
self.wsdl = None
# From Mark Pilgrim's "Dive Into Python" toolkit.py--open anything.
if self.wsdl is None and hasattr(wsdlsource, "read"):
print 'stream:', wsdlsource
try:
self.wsdl = reader.loadFromStream(wsdlsource)
except xml.parsers.expat.ExpatError, e:
newstream = urllib.URLopener(key_file=config.SSL.key_file, cert_file=config.SSL.cert_file).open(wsdlsource)
buf = newstream.readlines()
raise Error, "Unable to parse WSDL file at %s: \n\t%s" % \
(wsdlsource, "\t".join(buf))
# NOT TESTED (as of April 17, 2003)
#if self.wsdl is None and wsdlsource == '-':
# import sys
# self.wsdl = reader.loadFromStream(sys.stdin)
# print 'stdin'
示例11: download_snli
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import URLopener [as 别名]
def download_snli():
'''Creates data and snli paths and downloads SNLI in the home dir'''
home = os.environ['HOME']
data_dir = join(home, '.data')
snli_dir = join(data_dir, 'snli')
snli_url = 'http://nlp.stanford.edu/projects/snli/snli_1.0.zip'
if not os.path.exists(data_dir):
os.mkdir(data_dir)
if not os.path.exists(snli_dir):
os.mkdir(snli_dir)
if not os.path.exists(join(data_dir, 'snli_1.0.zip')):
print('Downloading SNLI...')
snlidownload = urllib.URLopener()
snlidownload.retrieve(snli_url, join(data_dir, "snli_1.0.zip"))
print('Opening zip file...')
archive = zipfile.ZipFile(join(data_dir, 'snli_1.0.zip'), 'r')
return archive, snli_dir
示例12: _download_data
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import URLopener [as 别名]
def _download_data(self):
if osp.exists(self.dataset_dir):
print("This dataset has been downloaded.")
return
print("Creating directory {}".format(self.dataset_dir))
mkdir_if_missing(self.dataset_dir)
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
print("Downloading DukeMTMC-VideoReID dataset")
url_opener = urllib.URLopener()
url_opener.retrieve(self.dataset_url, fpath)
print("Extracting files")
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(self.dataset_dir)
zip_ref.close()
示例13: retriever
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import URLopener [as 别名]
def retriever(source, destination, *args):
class Opener(urllib.URLopener):
version = randomagent()
Opener().retrieve(source, destination, *args)
示例14: retriever
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import URLopener [as 别名]
def retriever(source, destination, *args):
class Opener(URLopener):
version = randomagent()
Opener().retrieve(source, destination, *args)
示例15: __init__
# 需要导入模块: import urllib [as 别名]
# 或者: from urllib import URLopener [as 别名]
def __init__(self, url):
super(HttpDB, self).__init__()
self.baseurl = url._replace(fragment="").geturl()
self.db = urlop = URLopener()
for hdr, val in (
tuple(x.split("=", 1)) if "=" in x else (x, "")
for x in url.fragment.split("&")
if x
):
urlop.addheader(hdr, val)