本文整理汇总了Python中urllib2.unquote方法的典型用法代码示例。如果您正苦于以下问题:Python urllib2.unquote方法的具体用法?Python urllib2.unquote怎么用?Python urllib2.unquote使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类urllib2
的用法示例。
在下文中一共展示了urllib2.unquote方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: handle_starttag
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import unquote [as 别名]
def handle_starttag(self, tag, attrs):
if tag == 'h3' and attrs == [('class', 'r')]:
self.h3_flag = True
if tag == 'a' and self.h3_flag:
self.a_flag = True
if tag == 'b' and self.a_flag:
self.b_flag = True
if self.a_flag:
for (key, value) in attrs:
if key == 'href':
if value.startswith("/url?"):
m = match('/url\?(url|q)=(.+?)&', value)
if m and len(m.groups()) == 2:
href = urllib2.unquote(m.group(2))
self.link = href
else:
self.link = value
示例2: get_video_url
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import unquote [as 别名]
def get_video_url(page_url, premium = False, user="", password="", video_password="", page_data=""):
logger.info("tvalacarta.servers.vuittv get_video_url page_url="+page_url)
data = scrapertools.cache_page(page_url)
url2 = scrapertools.find_single_match(data,'<iframe width="[^"]+" height="[^"]+" scrolling="[^"]+" data-src="(http://www-arucitys-com.filesusr.com[^"]+)"')
logger.info("url2="+url2)
data = scrapertools.cache_page(url2)
media_url = scrapertools.find_single_match(data,'"sourceURL"\:"([^"]+)"')
logger.info("media_url="+media_url)
media_url = urllib2.unquote(media_url)
logger.info("media_url="+media_url)
video_urls = []
video_urls.append([ scrapertools.get_filename_from_url(media_url)[-4:], media_url ])
return video_urls
# Encuentra vídeos del servidor en el texto pasado
示例3: cloudspace
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import unquote [as 别名]
def cloudspace():
'''
show xunlei cloud space content
'''
dhurl = '%s/%s/?type=all&order=create&t=%s' % (
cloudurlpre,
'req_history_play_list/req_num/200/req_offset/0',
cachetime)
rsp = xl.urlopen(dhurl)
vods = json.loads(rsp)['resp']['history_play_list']
menu = [
{'label': urllib2.unquote(v['file_name'].encode('utf-8')),
'path': plugin.url_for('playcloudvideo',
vinfo=str((v['src_url'], v['gcid'],
v['cid'], v['file_name'])))}
for v in vods if 'src_url' in v]
return menu
示例4: _cleanstr
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import unquote [as 别名]
def _cleanstr(self, str):
str = str.strip()
if str.find("function") == 0:
pattern = (r"=\"([^\"]+).*}\s*\((\d+)\)")
args = re.search(pattern, str, re.DOTALL)
if args:
a = args.groups()
def openload_re(match):
c = match.group(0)
b = ord(c) + int(a[1])
return chr(b if (90 if c <= "Z" else 122) >= b else b - 26)
str = re.sub(r"[a-zA-Z]", openload_re, a[0]);
str = urllib2.unquote(str)
elif str.find("decodeURIComponent") == 0:
str = re.sub(r"(^decodeURIComponent\s*\(\s*('|\"))|(('|\")\s*\)$)", "", str);
str = urllib2.unquote(str)
elif str.find("\"") == 0:
str = re.sub(r"(^\")|(\"$)|(\".*?\")", "", str);
elif str.find("'") == 0:
str = re.sub(r"(^')|('$)|('.*?')", "", str);
return str
示例5: obfuscation_unescape
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import unquote [as 别名]
def obfuscation_unescape(page):
soup = BeautifulSoup(page, "lxml")
for scr in soup(["script"]):
if re.search('unescape', str(scr), re.IGNORECASE):
encoded = re.search("(?:%[0-9A-F][0-9A-F][^\"]+)", str(scr), re.IGNORECASE)
decoded_content = urllib2.unquote(encoded.group(0))
scr.replace_with(decoded_content)
decoded_page = soup.decode(formatter=None)
tmp_file = "/tmp/tmp.html"
with open (tmp_file, "wb") as temp_f:
temp_f.write(decoded_page)
temp_f.close()
try:
response = br.open('file://' + tmp_file)
global using_selenium
using_selenium = True
return response
except Exception:
return False
示例6: log_url
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import unquote [as 别名]
def log_url(log, message, url, level = logging.DEBUG ):
"""Nicely logs the given url.
Print out the url with the first part (protocol, host, port, authority,
user info, path, ref) and in sequence all the query parameters.
log: the log into which write the message
message: a message to print before the url
url: the url to log
level: (optional) the log level to use"""
urls = url.split('?')
log.log( level, message + unquote(urls[0]) )
if len(urls) > 1:
for a in sorted(urls[1].split('&')):
param = a.split('=')
if( len(param) < 2 ):
param.append('')
log.log( level, ' . %s = %s', unquote(param[0]), unquote(param[1]) )
示例7: build_httmock_functions
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import unquote [as 别名]
def build_httmock_functions(mock_response_dir):
print 'building mock functions'
functions = []
for filename in listdir(mock_response_dir):
filepath = join(mock_response_dir,filename)
if isfile(filepath):
method = None
for _method in ('GET', 'POST', 'PUT', 'DELETE', 'PATCH'):
if filename.startswith(_method):
filename = filename[len(_method):]
method = _method
url = urllib2.unquote(filename)
parts = urlparse(url)
params = {}
if parts.query:
print parts.query
params = dict(parse_qsl(parts.query))
print params
with open(filepath) as f:
content = f.read()
functions.append(build_httmock_function(
parts.scheme, parts.netloc, parts.path, content, params=params, method=method))
return functions
示例8: __extract_video
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import unquote [as 别名]
def __extract_video(self, item):
sources = []
for e in item:
if isinstance(e, dict):
for key in e:
for item2 in e[key]:
if isinstance(item2, list):
for item3 in item2:
if isinstance(item3, list):
for item4 in item3:
if isinstance(item4, unicode):
item4 = item4.encode('utf-8')
if isinstance(item4, basestring):
item4 = urllib2.unquote(item4).decode('unicode_escape')
for match in re.finditer('url=(?P<link>[^&]+).*?&itag=(?P<itag>[^&]+)', item4):
link = match.group('link')
itag = match.group('itag')
quality = self.itag_map.get(itag, 'Unknown Quality [%s]' % itag)
sources.append((quality, link))
if sources:
return sources
return sources
示例9: _parse_gdocs
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import unquote [as 别名]
def _parse_gdocs(self, html):
urls = []
for match in re.finditer('\[\s*"([^"]+)"\s*,\s*"([^"]+)"\s*\]', html):
key, value = match.groups()
if key == 'fmt_stream_map':
items = value.split(',')
for item in items:
_source_itag, source_url = item.split('|')
if isinstance(source_url, unicode):
source_url = source_url.encode('utf-8')
source_url = source_url.decode('unicode_escape')
quality = self.itag_map.get(_source_itag, 'Unknown Quality [%s]' % _source_itag)
source_url = urllib2.unquote(source_url)
urls.append((quality, source_url))
return urls
return urls
示例10: myviru
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import unquote [as 别名]
def myviru(self,url,referer,options):
COOKIEFILE = ptv.getAddonInfo('path') + os.path.sep + "cookies" + os.path.sep + "myviru.cookie"
query_data = { 'url': url, 'use_host': False, 'use_header': False, 'use_cookie': True, 'cookiefile': COOKIEFILE, 'save_cookie': True, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
linkvideo = ''
match2= re.compile("dataUrl:'(.*?)',").findall(link)
if len(match2)>0:
mylink = 'http://myvi.ru'+urllib2.unquote(match2[0])
query_data = { 'url': mylink, 'use_host': False, 'use_header': False, 'use_cookie': True, 'cookiefile': COOKIEFILE, 'load_cookie': True, 'use_post': False, 'return_data': True }
result = self.cm.getURLRequestData(query_data)
result = urllib.unquote(result).replace('\\/', '/').replace('\n', '').replace('\'', '"').replace(' ', '')
match3= re.compile('"video":\[{"url":"([^"]+)"}\]').findall(result)
if len(match3)>0:
mycook = self.cm.getCookieItem(COOKIEFILE,'UniversalUserID')
mycook = urllib.urlencode({'UniversalUserID':mycook})
return '%s|Cookie=%s' % (match3[0], mycook)
return linkvideo
示例11: parseucaster
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import unquote [as 别名]
def parseucaster(self, url, referer,options):
print ("a", url, referer,options)
req = urllib2.Request(url)
req.add_header('Referer', 'http://' + referer)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:20.0) Gecko/20100101 Firefox/20.0')
response = urllib2.urlopen(req)
link = response.read()
response.close()
match = re.search('"file": "(.*?)"', link)
print ("ZZZZzzzz", link)
if match:
link = urllib.unquote(match.group(
1)) + ' pageUrl=http://aliez.tv/live/mlb/ swfUrl=http://player.longtailvideo.com/player.swf app=aliezlive-live live=true tcUrl=rtmp://play.aliez.com/aliezlive-live'
return link
else:
return False
示例12: parseraliez
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import unquote [as 别名]
def parseraliez(self, url, referer,options):
req = urllib2.Request(url)
req.add_header('Referer', referer)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:20.0) Gecko/20100101 Firefox/20.0')
response = urllib2.urlopen(req)
link = response.read()
response.close()
match = re.search('"file":(.*?)"(.*?)"', link)
print ("ZZZZzzzz", match, link)
print match.group(2)
if match:
link = urllib.unquote(match.group(
2)) + ' pageUrl=http://aliez.tv/live/mlb/ swfUrl=http://player.longtailvideo.com/player.swf app=aliezlive-live live=true tcUrl=rtmp://play.aliez.com/aliezlive-live'
return link
else:
return False
示例13: parserputlive
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import unquote [as 别名]
def parserputlive(self, url, referer,options):
print ("a", url, referer,options)
req = urllib2.Request(url)
req.add_header('Referer', 'http://' + referer)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:20.0) Gecko/20100101 Firefox/20.0')
response = urllib2.urlopen(req)
link = response.read()
response.close()
print ("Link", link)
match = re.compile('html\(unescape\("(.*?)"\)\);').findall(link)
if len(match) > 0:
print urllib.unquote(match[0])
match1 = re.compile('src="(.*?)"').findall(urllib.unquote(match[0]))
match2 = re.compile('streamer=(.*?)&').findall(urllib.unquote(match[0]))
match3 = re.compile('file=(.*?)&').findall(urllib.unquote(match[0]))
print ("Link", match1)
print ("Link", match2)
print ("Link", match3)
return match2[0] + match3[0] + ' pageUrl=' + match1[0] + ' swfUrl=' + match1[0]
#parsertopupload
示例14: myviru
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import unquote [as 别名]
def myviru(self,url,referer,options):
COOKIEFILE = self.cookieFileName('myviru')
query_data = { 'url': url, 'use_host': False, 'use_header': False, 'use_cookie': True, 'cookiefile': COOKIEFILE, 'save_cookie': True, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
self.log.info('aaa %s' % link)
linkvideo = ''
match2= re.compile("dataUrl:'(.*?)',").findall(link)
if len(match2)>0:
mylink = 'http://myvi.ru'+urllib2.unquote(match2[0])
query_data = { 'url': mylink, 'use_host': False, 'use_header': False, 'use_cookie': True, 'cookiefile': COOKIEFILE, 'save_cookie': True, 'load_cookie': True, 'use_post': False, 'return_data': True }
result = self.cm.getURLRequestData(query_data)
result = urllib.unquote(result).replace('\\/', '/').replace('\n', '').replace('\'', '"').replace(' ', '')
match3= re.compile('"video":\[{"url":"([^"]+)"}\]').findall(result)
if len(match3)>0:
self.log.info('aaa %s' % match3)
mycook = self.cm.getCookieItem(COOKIEFILE,'UniversalUserID')
mycook = urllib.urlencode({'UniversalUserID':mycook})
self.log.info('aaa %s' % mycook)
return '%s|Cookie=%s' % (match3[0], mycook)
return linkvideo
示例15: parserVIDZER
# 需要导入模块: import urllib2 [as 别名]
# 或者: from urllib2 import unquote [as 别名]
def parserVIDZER(self,url,referer, options):
query_data = { 'url': url, 'use_host': False, 'use_cookie': False, 'use_post': False, 'return_data': True }
link = self.cm.getURLRequestData(query_data)
match = re.search('href="(http[^"]+?getlink[^"]+?)"', link)
if match:
url = urllib.unquote( match.group(1) )
return url
r = re.search('value="(.+?)" name="fuck_you"', link)
r2 = re.search('name="confirm" type="submit" value="(.+?)"', link)
r3 = re.search('<a href="/file/([^"]+?)" target', link)
if r:
query_data = { 'url': 'http://www.vidzer.net/e/'+r3.group(1)+'?w=631&h=425', 'use_host': False, 'use_cookie': False, 'use_post': True, 'return_data': True }
postdata = {'confirm' : r2.group(1), 'fuck_you' : r.group(1)}
link = self.cm.getURLRequestData(query_data, postdata)
match = re.search("url: '([^']+?)'", link)
if match:
url = match.group(1) #+ '|Referer=http://www.vidzer.net/media/flowplayer/flowplayer.commercial-3.2.18.swf'
return url
else:
return ''
else:
return ''