本文整理汇总了Python中urllib2.install_opener方法的典型用法代码示例。如果您正苦于以下问题:Python urllib2.install_opener方法的具体用法?Python urllib2.install_opener怎么用?Python urllib2.install_opener使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类urllib2
的用法示例。
在下文中一共展示了urllib2.install_opener方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: retrieve_status_page
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import install_opener [as 别名]
def retrieve_status_page(user, password, url):
try:
ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = ssl._create_unverified_context
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, url, user, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
req = urllib2.Request(url)
try:
response = urllib2.urlopen(req)
return response.read()
except Exception:
raise CrawlError("can't access to http://%s", url)
示例2: probe_html5
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import install_opener [as 别名]
def probe_html5(self, result):
class NoRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
infourl.code = code
return infourl
http_error_300 = http_error_302
http_error_301 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
opener = urllib2.build_opener(NoRedirectHandler())
urllib2.install_opener(opener)
r = urllib2.urlopen(urllib2.Request(result['url'], headers=result['headers']))
if r.code == 200:
result['url'] = r.read()
return result
示例3: scan
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import install_opener [as 别名]
def scan(url, redirect, insecure, useragent, postdata, proxy):
request = urllib2.Request(url.geturl())
request.add_header('User-Agent', useragent)
request.add_header('Origin', 'http://hsecscan.com')
request.add_header('Accept', '*/*')
if postdata:
request.add_data(urllib.urlencode(postdata))
build = [urllib2.HTTPHandler()]
if redirect:
build.append(RedirectHandler())
if proxy:
build.append(urllib2.ProxyHandler({'http': proxy, 'https': proxy}))
if insecure:
context = ssl._create_unverified_context()
build.append(urllib2.HTTPSHandler(context=context))
urllib2.install_opener(urllib2.build_opener(*build))
response = urllib2.urlopen(request)
print '>> RESPONSE INFO <<'
print_response(response.geturl(), response.getcode(), response.info())
print '>> RESPONSE HEADERS DETAILS <<'
for header in response.info().items():
check_header(header)
print '>> RESPONSE MISSING HEADERS <<'
missing_headers(response.info().items(), url.scheme)
示例4: totalPage
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import install_opener [as 别名]
def totalPage(self):
self.loadedCookies = self.loadCookies()
if not self.loadedCookies:
return False
# page index start from 0 end at max-1
req = urllib2.Request('http://dict.youdao.com/wordbook/wordlist?p=0&tags=')
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.loadedCookies))
urllib2.install_opener(opener)
response = urllib2.urlopen(req)
source = response.read()
if '密码错误' in source:
return False
else:
try:
return int(re.search('<a href="wordlist.p=(.*).tags=" class="next-page">最后一页</a>', source, re.M | re.I).group(1)) - 1
except Exception:
return 1
示例5: getUrlrh
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import install_opener [as 别名]
def getUrlrh(url, data=None, header={}, usecookies=True):
cj = cookielib.LWPCookieJar()
if usecookies:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
if not header:
header = {'User-Agent':UA}
rh={}
req = urllib2.Request(url, data, headers=header)
try:
response = urllib2.urlopen(req, timeout=15)
for k in response.headers.keys(): rh[k]=response.headers[k]
link = response.read()
response.close()
except:
link=''
c = ''.join(['%s=%s' % (c.name, c.value) for c in cj]) if cj else ''
return link,rh
示例6: retrieve_status_page
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import install_opener [as 别名]
def retrieve_status_page(hostname, port, user, password):
statusPage = "http://%s:%s/manager/status?XML=true" % (hostname, port)
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, statusPage, user, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
req = urllib2.Request(statusPage)
try:
response = urllib2.urlopen(req)
return response.read()
except Exception:
raise CrawlError("can't access to http://%s:%s",
hostname, port)
示例7: bing_search
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import install_opener [as 别名]
def bing_search(query, key, offset, **kwargs):
''' Make the search '''
username = ''
baseURL = 'https://api.datamarket.azure.com/Bing/Search/'
query = urllib.quote(query)
user_agent = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)'
credentials = (':%s' % key).encode('base64')[:-1]
auth = 'Basic %s' % credentials
url = baseURL+'Web?Query=%27'+query+'%27&$top=50&$format=json&$skip='+offset
print '[*] Fetching '+url
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, url, username, key)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
try:
readURL = urllib2.urlopen(url, timeout=60).read()
except Exception as e:
sys.exit('[-] Failed to fetch bing results. Are you sure you have the right API key?\n Error: '+str(e))
return readURL
示例8: download_vcpython27
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import install_opener [as 别名]
def download_vcpython27(self):
"""
Download vcpython27 since some Windows 7 boxes have it and some don't.
:return: None
"""
self._prepare_for_download()
logger.info('Beginning download of vcpython27... this may take a few minutes...')
with open(os.path.join(DOWNLOADS_DIR, 'vcpython27.msi'), 'wb') as f:
if self.PROXY is not None:
opener = urllib2.build_opener(
urllib2.HTTPHandler(),
urllib2.HTTPSHandler(),
urllib2.ProxyHandler({'http': self.PROXY, 'https': self.PROXY})
)
urllib2.install_opener(opener)
f.write(urllib2.urlopen(self.VCPYTHON27_DOWNLOAD_URL, timeout=self.DOWNLOAD_TIMEOUT).read())
logger.debug('Download of vcpython27 complete')
示例9: download_python
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import install_opener [as 别名]
def download_python(self):
"""
Download Python
:return: None
"""
self._prepare_for_download()
logger.info('Beginning download of python')
with open(os.path.join(DOWNLOADS_DIR, 'python-installer.msi'), 'wb') as f:
if self.PROXY is not None:
opener = urllib2.build_opener(
urllib2.HTTPHandler(),
urllib2.HTTPSHandler(),
urllib2.ProxyHandler({'http': self.PROXY, 'https': self.PROXY})
)
urllib2.install_opener(opener)
f.write(urllib2.urlopen(self.PYTHON_DOWNLOAD_URL, timeout=self.DOWNLOAD_TIMEOUT).read())
logger.debug('Download of python complete')
示例10: getUrlRespHtml
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import install_opener [as 别名]
def getUrlRespHtml(url):
respHtml=''
try:
heads = {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset':'GB2312,utf-8;q=0.7,*;q=0.7',
'Accept-Language':'zh-cn,zh;q=0.5',
'Cache-Control':'max-age=0',
'Connection':'keep-alive',
'Keep-Alive':'115',
'User-Agent':'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.14) Gecko/20110221 Ubuntu/10.10 (maverick) Firefox/3.6.14'}
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
urllib2.install_opener(opener)
req = urllib2.Request(url)
opener.addheaders = heads.items()
respHtml = opener.open(req).read()
except Exception:
pass
return respHtml
示例11: getUrlRespHtmlByProxy
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import install_opener [as 别名]
def getUrlRespHtmlByProxy(url,proxy):
respHtml=''
try:
heads = {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset':'GB2312,utf-8;q=0.7,*;q=0.7',
'Accept-Language':'zh-cn,zh;q=0.5',
'Cache-Control':'max-age=0',
'Connection':'keep-alive',
'Keep-Alive':'115',
'User-Agent':'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.14) Gecko/20110221 Ubuntu/10.10 (maverick) Firefox/3.6.14'}
opener = urllib2.build_opener(urllib2.ProxyHandler({'https':proxy}))
urllib2.install_opener(opener)
req = urllib2.Request(url)
opener.addheaders = heads.items()
respHtml = opener.open(req).read()
except Exception:
pass
return respHtml
示例12: getContent_GET
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import install_opener [as 别名]
def getContent_GET(url,param,injection):
global log
"""
Get the content of the url by GET method
"""
newUrl = url
ret = None
if url.find('?') < 0:
if url[len(url)-1] != '/' and not allowedExtensions(url):
url += '/'
newUrl = url + '?' + param + '=' + single_urlencode(str(injection))
else:
newUrl = url + '&' + param + '=' + single_urlencode(str(injection))
try:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
log <= ( newUrl)
req = Request(newUrl, None, txheaders) # create a request object
ret = urlopen(req) # and open it to return a handle on the url
ret = urlopen(req) # and open it to return a handle on the url
except HTTPError, e:
log <= ( 'The server couldn\'t fulfill the request.')
log <= ( 'Error code: %s' % e.code)
return None
示例13: getContentDirectURL_GET
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import install_opener [as 别名]
def getContentDirectURL_GET(url, string):
global log
"""
Get the content of the url by GET method
"""
ret = ""
try:
if len(string) > 0:
if url[len(url)-1] != '/' and url.find('?') < 0 and not allowedExtensions(url):
url += '/'
url = url + "?" + (string)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
log <= ( url)
req = Request(url, None, txheaders) # create a request object
ret = urlopen(req) # and open it to return a handle on the url
except HTTPError, e:
log <= ( 'The server couldn\'t fulfill the request.')
log <= ( 'Error code: %s' % e.code)
return None
示例14: getContent_POST
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import install_opener [as 别名]
def getContent_POST(url,param,injection):
global log
"""
Get the content of the url by POST method
"""
txdata = urllib.urlencode({param: injection})
ret = None
try:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
log <= ( url)
log <= ( txdata)
req = Request(url, txdata, txheaders) # create a request object
ret = urlopen(req) # and open it to return a handle on the url
ret = urlopen(req) # and open it to return a handle on the url
except HTTPError, e:
log <= ( 'The server couldn\'t fulfill the request.')
log <= ( 'Error code: %s' % e.code)
return None
示例15: getContentDirectURL_POST
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import install_opener [as 别名]
def getContentDirectURL_POST(url,allParams):
global log
"""
Get the content of the url by POST method
"""
txdata = urllib.urlencode(allParams)
ret = None
try:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
log <= ( url)
log <= ( txdata)
req = Request(url, txdata, txheaders) # create a request object
ret = urlopen(req) # and open it to return a handle on the url
ret = urlopen(req) # and open it to return a handle on the url
except HTTPError, e:
log <= ( 'The server couldn\'t fulfill the request.')
log <= ( 'Error code: %s' % e.code)
return None