本文整理汇总了Python中resources.lib.modules.client.request函数的典型用法代码示例。如果您正苦于以下问题:Python request函数的具体用法?Python request怎么用?Python request使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了request函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
html = client.request(url)
source = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(html)[0]
if 'consistent.stream' in source:
html = client.request(source)
page = re.compile(""":title=["'](.+?)["']\>""").findall(html)[0]
decode = client.replaceEscapeCodes(page)
links = re.compile('"sources.+?"(http.+?)"',re.DOTALL).findall(decode)
for link in links:
link = link.replace('\\','')
if '1080' in link:
quality='1080p'
elif '720' in link:
quality = '720p'
else:
quality = 'DVD'
host = link.split('//')[1].replace('www.','')
host = host.split('/')[0].split('.')[0].title()
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('VexMovies - Exception: \n' + str(failure))
return sources
示例2: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
if 'tvshowtitle' in data:
url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
year = re.findall('(\d{4})', data['premiered'])[0]
r = client.request(url)
y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0]
y = re.findall('(\d{4})', y)[0]
if not y == year: raise Exception()
else:
r = client.request(url)
result = re.findall('''['"]file['"]:['"]([^'"]+)['"],['"]label['"]:['"]([^'"]+)''', r)
for i in result:
url = i[0].replace('\/', '/')
sources.append({'source': 'gvideo', 'quality': source_utils.label_to_quality(i[1]), 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
return sources
except:
return
示例3: movie
def movie(self, imdb, title, localtitle, aliases, year):
try:
langMap = {'hi':'hindi', 'ta':'tamil', 'te':'telugu', 'ml':'malayalam', 'kn':'kannada', 'bn':'bengali', 'mr':'marathi', 'pa':'punjabi'}
lang = 'http://www.imdb.com/title/%s/' % imdb
lang = client.request(lang)
lang = re.findall('href\s*=\s*[\'|\"](.+?)[\'|\"]', lang)
lang = [i for i in lang if 'primary_language' in i]
lang = [urlparse.parse_qs(urlparse.urlparse(i).query) for i in lang]
lang = [i['primary_language'] for i in lang if 'primary_language' in i]
lang = langMap[lang[0][0]]
q = self.search_link % (lang, urllib.quote_plus(title))
q = urlparse.urljoin(self.base_link, q)
t = cleantitle.get(title)
r = client.request(q)
r = client.parseDOM(r, 'li')
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h3'), client.parseDOM(i, 'div', attrs = {'class': 'info'})) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]
r = [(re.findall('(\d+)', i[0]), i[1], re.findall('(\d{4})', i[2])) for i in r]
r = [(i[0][0], i[1], i[2][0]) for i in r if i[0] and i[2]]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = str(r)
return url
except:
return
示例4: sky_list
def sky_list(self, num, channel, id):
try:
url = self.sky_now_link % id
result = client.request(url, timeout='10')
result = json.loads(result)
match = result['listings'][id][0]['url']
dt1 = (self.uk_datetime).strftime('%Y-%m-%d')
dt2 = int((self.uk_datetime).strftime('%H'))
if (dt2 < 6): dt2 = 0
elif (dt2 >= 6 and dt2 < 12): dt2 = 1
elif (dt2 >= 12 and dt2 < 18): dt2 = 2
elif (dt2 >= 18): dt2 = 3
url = self.sky_programme_link % (id, str(dt1), str(dt2))
result = client.request(url, timeout='10')
result = json.loads(result)
result = result['listings'][id]
result = [i for i in result if i['url'] == match][0]
year = result['d']
year = re.findall('[(](\d{4})[)]', year)[0].strip()
year = year.encode('utf-8')
title = result['t']
title = title.replace('(%s)' % year, '').strip()
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
self.items.append((title, year, channel, num))
except:
pass
示例5: cloudflareCookie
def cloudflareCookie(url, post, headers, mobile, safe, timeout):
try:
result = client.request(url, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, error=True)
jschl = re.compile('name="jschl_vc" value="(.+?)"/>').findall(result)[0]
init = re.compile('setTimeout\(function\(\){\s*.*?.*:(.*?)};').findall(result)[-1]
builder = re.compile(r"challenge-form\'\);\s*(.*)a.v").findall(result)[0]
decryptVal = parseJSString(init)
lines = builder.split(';')
for line in lines:
if len(line)>0 and '=' in line:
sections=line.split('=')
line_val = parseJSString(sections[1])
decryptVal = int(eval(str(decryptVal)+sections[0][-1]+str(line_val)))
answer = decryptVal + len(urlparse.urlparse(url).netloc)
query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (url, jschl, answer)
if 'type="hidden" name="pass"' in result:
passval = re.compile('name="pass" value="(.*?)"').findall(result)[0]
query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % (url, urllib.quote_plus(passval), jschl, answer)
time.sleep(5)
cookie = client.request(query, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, output='cookie', error=True)
return cookie
except:
pass
示例6: __get_movie_url
def __get_movie_url(self, data):
try:
query = data['title'].lower().replace(' ', '+')
path = self.movie_search % query
url = urlparse.urljoin(self.base_link, path)
response = client.request(url, headers=self.headers)
movie_id = json.loads(response)[0]['id']
path = self.movie_details % movie_id
url = urlparse.urljoin(self.base_link, path)
response = client.request(url, headers=self.headers)
token_encrypted = json.loads(response)['langs'][0]['sources'][0]['hash']
token = self.__decrypt(token_encrypted)
path = self.fetcher % token
url = urlparse.urljoin(self.base_link, path)
return url
except Exception:
return
示例7: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = client.parseDOM(r, 'div', attrs = {'class': 'player_wraper'})
r = client.parseDOM(r, 'iframe', ret='src')[0]
r = urlparse.urljoin(url, r)
r = client.request(r, referer=url)
a = client.parseDOM(r, 'div', ret='value', attrs = {'id': 'k2'})[-1]
b = client.parseDOM(r, 'div', ret='value', attrs = {'id': 'k1'})[-1]
c = client.parseDOM(r, 'body', ret='style')[0]
c = re.findall('(\d+)', c)[-1]
r = '/player/%s?s=%s&e=%s' % (a, b, c)
r = urlparse.urljoin(url, r)
r = client.request(r, referer=url)
r = re.findall('"(?:url|src)"\s*:\s*"(.+?)"', r)
for i in r:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
except: pass
return sources
except:
return sources
示例8: __search
def __search(self, titles, year):
try:
query = self.search_link % (cleantitle.getsearch(titles[0].replace(' ','%20')))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i][0]
r = client.request(query)
r = client.parseDOM(r, 'li', attrs={'class': 'item everyone-item over_online haveTooltip'})
for i in r:
title = client.parseDOM(i, 'a', ret='title')[0]
url = client.parseDOM(i, 'a', ret='href')[0]
data = client.request(url)
y = re.findall('<p><span>Año:</span>(\d{4})',data)[0]
original_t = re.findall('movie-text">.+?h2.+?">\((.+?)\)</h2>',data, re.DOTALL)[0]
original_t, title = cleantitle.get(original_t), cleantitle.get(title)
if (t in title or t in original_t) and y == year :
x = dom_parser.parse_dom(i, 'a', req='href')
return source_utils.strip_domain(x[0][0]['href'])
return
except:
return
示例9: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
query = urlparse.urljoin(self.base_link, url)
r = client.request(query)
q = re.findall("'(http://www.elreyxhd.+?)'",r, re.DOTALL)[0]
links = client.request(q)
links = client.parseDOM(links, 'a', ret='href')
for url in links:
lang, info = 'es', 'LAT'
qual = 'HD'
if not 'http' in url: continue
if 'elrey' in url :continue
valid, host = source_utils.is_host_valid(url, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': qual, 'language': lang, 'url': url, 'info': info, 'direct':
False,'debridonly': False})
return sources
except:
return sources
示例10: __search
def __search(self, titles, year, imdb):
try:
query = self.search_link % (urllib.quote_plus(titles[0]))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie_cell'})
r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'bottom'}), dom_parser.parse_dom(i, 'div', attrs={'class': 'year'})) for i in r]
r = [(dom_parser.parse_dom(i[0], 'a', req=['href', 'title']), re.findall('[(](\d{4})[)]', i[1][0].content)) for i in r if i[0] and i[1]]
r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0]) for i in r if i[0] and i[1]]
r = [(i[0], i[1].lower(), i[2]) for i in r if i[2] in y]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t]
if len(r) > 1:
for i in r:
data = client.request(urlparse.urljoin(self.base_link, i))
data = dom_parser.parse_dom(data, 'a', attrs={'name': re.compile('.*/tt\d+.*')}, req='name')
data = [re.findall('.+?(tt\d+).*?', d.attrs['name']) for d in data]
data = [d[0] for d in data if len(d) > 0 and d[0] == imdb]
if len(data) >= 1:
url = i
else:
url = r[0]
if url:
return source_utils.strip_domain(url)
except:
return
示例11: sources
def sources(self, url, hostDict, locDict):
sources = []
try:
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
query = self.search_link % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
#query = urlparse.urljoin(self.base_link, self.ajax_link)
#post = urllib.urlencode({'action':'sufi_search', 'search_string': title})
result = client.request(query)
r = client.parseDOM(result, 'div', attrs={'id':'showList'})
r = re.findall(r'<a\s+style="color:white;"\s+href="([^"]+)">([^<]+)', r[0])
r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and data['year'] in i[1]][0]
url = r[0]
result = client.request(url)
r = re.findall(r'video\s+id="\w+.*?src="([^"]+)".*?data-res="([^"]+)',result,re.DOTALL)
for i in r:
try:
q = source_utils.label_to_quality(i[1])
sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': i[0], 'direct': True, 'debridonly': False})
except:
pass
return sources
except Exception as e:
return sources
示例12: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
h = {'User-Agent': client.agent()}
r = client.request(url, headers=h, output='extended')
s = client.parseDOM(r[0], 'ul', attrs = {'class': 'episodes'})
s = client.parseDOM(s, 'a', ret='data.+?')
s = [client.replaceHTMLCodes(i).replace(':', '=').replace(',', '&').replace('"', '').strip('{').strip('}') for i in s]
for u in s:
try:
url = '/io/1.0/stream?%s' % u
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = json.loads(r)
url = [i['src'] for i in r['streams']]
for i in url:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
except: pass
except:
pass
return sources
except:
return sources
示例13: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
query = urlparse.urljoin(self.base_link, url)
r = client.request(query)
q = client.parseDOM(r, 'ul', attrs={'class': 'tabs'})[0]
matches = re.compile('re">\d+.+?class="(\w{2})".+?c">([^>]+)<', re.DOTALL).findall(q)
urls_id = re.compile('<div id="tab\d+"\s*class="tab_content"><script>(\w+)\("([^"]+)"\)</script>',re.DOTALL).findall(r)
for i in range(0,len(urls_id)):
lang, info = self.get_lang_by_type(matches[i][0])
qual = matches[i][1]
qual = 'HD' if 'HD' or 'BR' in qual else 'SD'
url, host = self.url_function(urls_id[i][1], urls_id[i][0])
if 'goo' in url:
data = client.request(url)
url = re.findall('var\s*videokeyorig\s*=\s*"(.+?)"', data, re.DOTALL)[0]
url, host = 'http://hqq.tv/player/embed_player.php?vid=%s'%(url), 'netu.tv'
sources.append({'source': host, 'quality': qual, 'language': lang, 'url': url, 'info': info, 'direct': False,'debridonly': False})
return sources
except:
return sources
示例14: __search
def __search(self, titles, year, content):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0])))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i][0] #cleantitle.get(titles[0])
r = client.request(query)
r = client.parseDOM(r, 'div', attrs={'class': 'tab-content clearfix'})
if content == 'movies':
r = client.parseDOM(r, 'div', attrs={'id': 'movies'})
else:
r = client.parseDOM(r, 'div', attrs={'id': 'series'})
data = dom_parser.parse_dom(r, 'figcaption')
for i in data:
title = i[0]['title']
title = cleantitle.get(title)
if title in t:
x = dom_parser.parse_dom(i, 'a', req='href')
return source_utils.strip_domain(x[0][0]['href'])
else:
url = dom_parser.parse_dom(i, 'a', req='href')
data = client.request(url[0][0]['href'])
data = re.findall('<h1><a.+?">(.+?)\((\d{4})\).*?</a></h1>', data, re.DOTALL)[0]
if titles[0] in data[0] and year == data[1]: return source_utils.strip_domain(url[0][0]['href'])
return
except:
return
示例15: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['premiered'], url['season'], url['episode'] = premiered, season, episode
try:
clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
search_results = client.request(search_url)
parsed = client.parseDOM(search_results, 'div', {'id': 'movie-featured'})
parsed = [(client.parseDOM(i, 'a', ret='href'), re.findall('<b><i>(.+?)</i>', i)) for i in parsed]
parsed = [(i[0][0], i[1][0]) for i in parsed if cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
url = parsed[0][0]
except:
pass
data = client.request(url)
data = client.parseDOM(data, 'div', attrs={'id': 'details'})
data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]
return url[0][1]
except:
return