本文整理汇总了Python中resources.lib.modules.proxy.request函数的典型用法代码示例。如果您正苦于以下问题:Python request函数的具体用法?Python request怎么用?Python request使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了request函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
key = urlparse.urljoin(self.base_link, self.key_link)
key = proxy.request(key, 'searchform')
key = client.parseDOM(key, 'input', ret='value', attrs = {'name': 'key'})[0]
query = self.tvsearch_link % (urllib.quote_plus(cleantitle.query(tvshowtitle)), key)
query = urlparse.urljoin(self.base_link, query)
result = str(proxy.request(query, 'index_item'))
if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'index_item'))
result = client.parseDOM(result, 'div', attrs = {'class': 'index_item.+?'})
tvshowtitle = 'watch' + cleantitle.get(tvshowtitle)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [i for i in result if any(x in i[1] for x in years)]
r = []
for i in result:
u = i[0]
try: u = urlparse.parse_qs(urlparse.urlparse(u).query)['u'][0]
except: pass
try: u = urlparse.parse_qs(urlparse.urlparse(u).query)['q'][0]
except: pass
r += [(u, i[1])]
match = [i[0] for i in r if tvshowtitle == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]]
match2 = [i[0] for i in r]
match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
if match2 == []: return
for i in match2[:5]:
try:
if len(match) > 0: url = match[0] ; break
r = proxy.request(urlparse.urljoin(self.base_link, i), 'tv_episode_item')
if imdb in str(r): url = i ; break
except:
pass
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例2: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
t = cleantitle.get(tvshowtitle)
q = urllib.quote_plus(cleantitle.query(tvshowtitle))
p = urllib.urlencode({'term': q})
r = client.request(self.search_link, post=p, XHR=True)
try: r = json.loads(r)
except: r = None
r = None
if r:
r = [(i['seo_url'], i['value'], i['label']) for i in r if 'value' in i and 'label' in i and 'seo_url' in i]
else:
r = proxy.request(self.search_link_2 % q, 'tv shows')
r = client.parseDOM(r, 'div', attrs = {'valign': '.+?'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a')) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]
r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r]
r = [(i[0], i[1], i[2][-1]) for i in r if i[2]]
r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
url = r[0][0]
url = proxy.parse(url)
url = url.strip('/').split('/')[-1]
url = url.encode('utf-8')
return url
except:
failure = traceback.format_exc()
log_utils.log('XWatchSeries - Exception: \n' + str(failure))
return
示例3: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
t = cleantitle.get(tvshowtitle)
q = urllib.quote_plus(cleantitle.query(tvshowtitle))
p = urllib.urlencode({'term': q})
h = {'X-Requested-With': 'XMLHttpRequest'}
r = client.request(self.search_link, post=p, headers=h)
try: r = json.loads(r)
except: r = None
if r:
r = [(i['seo_url'], i['value'], i['label']) for i in r if 'value' in i and 'label' in i and 'seo_url' in i]
else:
r = proxy.request(self.search_link_2 % q, '/search/')
r = client.parseDOM(r, 'div', attrs = {'valign': '.+?'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a')) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]
r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r]
r = [(i[0], i[1], i[2][-1]) for i in r if i[2]]
r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
url = r[0][0]
url = proxy.parse(url)
url = url.strip('/').split('/')[-1]
url = url.encode('utf-8')
return url
except:
return
示例4: pftv_tvcache
def pftv_tvcache(self):
try:
url = urlparse.urljoin(self.base_link, self.search_link)
r = proxy.request(url, 'A-Z')
r = client.parseDOM(r, 'li')
m = []
for i in r:
try:
title = client.parseDOM(i, 'a')[0]
title = client.replaceHTMLCodes(title)
title = cleantitle.get(title)
title = title.encode('utf-8')
url = client.parseDOM(i, 'a', ret='href')[0]
url = client.replaceHTMLCodes(url)
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
except: pass
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
except: pass
url = urlparse.urljoin(self.base_link, url)
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = url.encode('utf-8')
m.append((url, title))
except:
pass
return m
except:
return
示例5: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.urljoin(self.base_link, url)
result = proxy.request(url, 'tv_episode_item')
result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})
title = cleantitle.get(title)
premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(premiered)[0]
premiered = '%s %01d %s' % (premiered[1].replace('01','January').replace('02','February').replace('03','March').replace('04','April').replace('05','May').replace('06','June').replace('07','July').replace('08','August').replace('09','September').replace('10','October').replace('11','November').replace('12','December'), int(premiered[2]), premiered[0])
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), client.parseDOM(i, 'span', attrs = {'class': 'tv_num_versions'})) for i in result]
result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]
url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
if len(url) == 0: url = [i for i in result if premiered == i[2]]
if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]
url = url[0][0]
url = proxy.parse(url)
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例6: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = '%s/serie/%s' % (self.base_link, url)
r = proxy.request(url, 'tv shows')
r = client.parseDOM(r, 'li', attrs = {'itemprop': 'episode'})
t = cleantitle.get(title)
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'itemprop': 'name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r]
r = [(i[0], i[1][0].split(' ')[-1], i[2]) for i in r if i[1]] + [(i[0], None, i[2]) for i in r if not i[1]]
r = [(i[0], i[1], i[2][0]) for i in r if i[2]] + [(i[0], i[1], None) for i in r if not i[2]]
r = [(i[0][0], i[1], i[2]) for i in r if i[0]]
url = [i for i in r if t == cleantitle.get(i[1]) and premiered == i[2]][:1]
if not url: url = [i for i in r if t == cleantitle.get(i[1])]
if len(url) > 1 or not url: url = [i for i in r if premiered == i[2]]
if len(url) > 1 or not url: raise Exception()
url = url[0][0]
url = proxy.parse(url)
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例7: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
r = proxy.request(url, 'tv shows')
links = client.parseDOM(r, 'a', ret='href', attrs = {'target': '.+?'})
links = [x for y,x in enumerate(links) if x not in links[:y]]
for i in links:
try:
url = i
url = proxy.parse(url)
url = urlparse.parse_qs(urlparse.urlparse(url).query)['r'][0]
url = url.decode('base64')
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = host.encode('utf-8')
sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例8: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.urljoin(self.base_link, url)
result = proxy.request(url, 'main_body')
result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})
title = cleantitle.get(title)
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in result]
result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]
url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
if len(url) == 0: url = [i for i in result if premiered == i[2]]
if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]
url = client.replaceHTMLCodes(url[0][0])
url = proxy.parse(url)
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例9: movie
def movie(self, imdb, title, year):
try:
#query = self.search_link % (urllib.quote_plus(cleantitle.query(title)), str(int(year)-1), str(int(year)+1))
#query = urlparse.urljoin(self.base_link, query)
query = self.search_link % urllib.quote_plus(cleantitle.query(title))
query = urlparse.urljoin(self.base_link, query)
result = str(proxy.request(query, 'movie_table'))
if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'movie_table'))
result = client.parseDOM(result, 'div', attrs = {'class': 'movie_table'})
title = cleantitle.get(title)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'img', ret='alt')) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [i for i in result if any(x in i[1] for x in years)]
try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['q'][0], i[1]) for i in result]
except: pass
try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result]
except: pass
try: result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result]
except: pass
match = [i[0] for i in result if title == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]]
match2 = [i[0] for i in result]
match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
if match2 == []: return
for i in match2[:5]:
try:
if len(match) > 0: url = match[0] ; break
result = proxy.request(urlparse.urljoin(self.base_link, i), 'link_name')
if imdb in str(result): url = i ; break
except:
pass
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例10: movie
def movie(self, imdb, title, year):
try:
query = self.search_link % imdb
query = urlparse.urljoin(self.base_link, query)
r = proxy.request(query, 'flag')
r = client.parseDOM(r, 'TR', attrs = {'id': 'coverPreview.+?'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'img', ret='src')) for i in r]
r = [i for i in r if any('us_flag_' in x for x in i[1])]
if len(r) > 0:
r = [i for i in r if any('5.gif' in x for x in i[1])]
r = [i[0][0] for i in r if len(i[0]) > 0][0]
else:
query = self.search_link % urllib.quote_plus(title)
query = urlparse.urljoin(self.base_link, query)
r = proxy.request(query, 'flag')
r = client.parseDOM(r, 'TR', attrs = {'id': 'coverPreview.+?'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'img', ret='src'), client.parseDOM(i, 'div', attrs = {'style': '.+?'}), client.parseDOM(i, 'a')) for i in r]
r = [i for i in r if len(i[0]) > 0 and len(i[3]) > 0]
r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[3][0])]
r = [i for i in r if any('us_flag_' in x for x in i[1])]
r = [i for i in r if any('5.gif' in x for x in i[1])]
r = [i for i in r if any(year in x for x in i[2])]
r = [i[0][0] for i in r if len(i[0]) > 0][0]
url = client.replaceHTMLCodes(r)
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
except: pass
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
except: pass
url = urlparse.urljoin(self.base_link, url)
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = url.encode('utf-8')
return url
except:
return
示例11: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
query = self.tvsearch_link % urllib.quote_plus(cleantitle.query(tvshowtitle))
query = urlparse.urljoin(self.base_link, query)
result = str(proxy.request(query, 'free movies'))
if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'free movies'))
result = client.parseDOM(result, 'div', attrs = {'class': 'item'})
tvshowtitle = 'watch' + cleantitle.get(tvshowtitle)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [i for i in result if any(x in i[1] for x in years)]
r = [(proxy.parse(i[0]), i[1]) for i in result]
match = [i[0] for i in r if tvshowtitle == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]]
match2 = [i[0] for i in r]
match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
if match2 == []: return
for i in match2[:5]:
try:
if len(match) > 0: url = match[0] ; break
r = proxy.request(urlparse.urljoin(self.base_link, i), 'free movies')
r = re.findall('(tt\d+)', r)
if imdb in r: url = i ; break
except:
pass
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例12: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if url.isdigit(): url = '/watch-%s-online-free-%s.html' % (url, url)
url = urlparse.urljoin(self.base_link, url)
result = proxy.request(url, 'ovie')
quality = re.compile('Quality(.+?)<').findall(result.replace('\n',''))
quality = quality[0].strip() if quality else 'SD'
if quality == 'CAM' or quality == 'TS': quality = 'CAM'
elif quality == 'SCREENER': quality = 'SCR'
else: quality = 'SD'
dupes = []
links = re.findall('\'(.+?)\'', result) + re.findall('\"(.+?)\"', result)
links = [proxy.parse(i) for i in links]
links = [i for i in links if i.startswith('http')]
links = [x for y,x in enumerate(links) if x not in links[:y]]
for i in links:
try:
url = i
url = urlparse.urlparse(url).query
url = url.decode('base64')
url = re.findall('((?:http|https)://.+?/.+?)(?:&|$)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
if url in dupes: raise Exception()
dupes.append(url)
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Movies25', 'url': url, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例13: resolve
def resolve(self, url):
try:
r = proxy.request(url, 'nofollow')
url = client.parseDOM(r, 'a', ret='href', attrs = {'rel': 'nofollow'})
url = [i for i in url if not urlparse.urlparse(self.base_link).netloc in i]
url = client.replaceHTMLCodes(url[0])
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
except: pass
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
except: pass
url = url.encode('utf-8')
return url
except:
return
示例14: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = proxy.request(url, 'Links - Quality')
result = result.replace('\n','')
quality = re.compile('>Links - Quality(.+?)<').findall(result)[0]
quality = quality.strip()
if quality == 'CAM' or quality == 'TS': quality = 'CAM'
elif quality == 'SCREENER': quality = 'SCR'
else: quality = 'SD'
links = client.parseDOM(result, 'div', attrs = {'id': 'links'})[0]
links = links.split('link_name')
for i in links:
try:
url = client.parseDOM(i, 'a', ret='href')[0]
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
except: pass
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
except: pass
url = urlparse.urlparse(url).query
url = base64.b64decode(url)
url = re.findall('((?:http|https)://.+?/.+?)(?:&|$)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Movie25', 'url': url, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例15: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
r = proxy.request(url, 'episode-meta')
meta = client.parseDOM(r, 'div', attrs = {'class': 'wp-episode-meta'})[0]
meta = urllib.unquote_plus(meta)
if 'genre/coming-soon' in meta: raise Exception()
quality = client.parseDOM(meta, 'li')
quality = [re.sub('<.+?>|</.+?>', '', i) for i in quality]
quality = [i.split(':')[-1].strip().upper() for i in quality if 'quality' in i.lower()]
quality = quality[0] if quality else 'SD'
if 'CAM' in quality or 'TS' in quality: quality = 'CAM'
elif quality == 'SCREENER': quality = 'SCR'
else: quality = 'SD'
links = client.parseDOM(r, 'a', ret='href', attrs = {'target': '.+?'})
links = [x for y,x in enumerate(links) if x not in links[:y]]
for i in links:
try:
url = i
url = proxy.parse(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'WMO', 'url': url, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources