本文整理汇总了Python中resources.lib.modules.source_utils.label_to_quality函数的典型用法代码示例。如果您正苦于以下问题:Python label_to_quality函数的具体用法?Python label_to_quality怎么用?Python label_to_quality使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了label_to_quality函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
ref = urlparse.urljoin(self.base_link, url)
url = urlparse.urljoin(self.base_link, self.ajax_link % re.findall('-(\w+)$', ref)[0])
headers = {'Referer': ref, 'User-Agent': client.randomagent()}
result = client.request(url, headers=headers, post='')
result = base64.decodestring(result)
result = json.loads(result).get('playinfo', [])
if isinstance(result, basestring):
result = result.replace('embed.html', 'index.m3u8')
base_url = re.sub('index\.m3u8\?token=[\w\-]+', '', result)
r = client.request(result, headers=headers)
r = [(i[0], i[1]) for i in re.findall('#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+)[^\n]+\n([^\n]+)', r, re.DOTALL) if i]
r = [(source_utils.label_to_quality(i[0]), i[1] + source_utils.append_headers(headers)) for i in r]
r = [{'quality': i[0], 'url': base_url+i[1]} for i in r]
for i in r: sources.append({'source': 'CDN', 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': True, 'debridonly': False})
elif result:
result = [i.get('link_mp4') for i in result]
result = [i for i in result if i]
for i in result:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'de', 'url': i, 'direct': True, 'debridonly': False})
except: pass
return sources
except:
return
示例2: sources
def sources(self, url, hostDict, locDict):
sources = []
try:
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
query = self.search_link % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
#query = urlparse.urljoin(self.base_link, self.ajax_link)
#post = urllib.urlencode({'action':'sufi_search', 'search_string': title})
result = client.request(query)
r = client.parseDOM(result, 'div', attrs={'id':'showList'})
r = re.findall(r'<a\s+style="color:white;"\s+href="([^"]+)">([^<]+)', r[0])
r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and data['year'] in i[1]][0]
url = r[0]
result = client.request(url)
r = re.findall(r'video\s+id="\w+.*?src="([^"]+)".*?data-res="([^"]+)',result,re.DOTALL)
for i in r:
try:
q = source_utils.label_to_quality(i[1])
sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': i[0], 'direct': True, 'debridonly': False})
except:
pass
return sources
except Exception as e:
return sources
示例3: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
if 'tvshowtitle' in data:
url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
year = re.findall('(\d{4})', data['premiered'])[0]
r = client.request(url)
y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0]
y = re.findall('(\d{4})', y)[0]
if not y == year: raise Exception()
else:
r = client.request(url)
result = re.findall('''['"]file['"]:['"]([^'"]+)['"],['"]label['"]:['"]([^'"]+)''', r)
for i in result:
url = i[0].replace('\/', '/')
sources.append({'source': 'gvideo', 'quality': source_utils.label_to_quality(i[1]), 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
return sources
except:
return
示例4: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
url = data.get('url')
episode = int(data.get('episode', 1))
r = client.request(urlparse.urljoin(self.base_link, url))
r = dom_parser.parse_dom(r, 'div', attrs={'id': 'streams'})
rels = dom_parser.parse_dom(r, 'ul', attrs={'class': 'nav'})
rels = dom_parser.parse_dom(rels, 'li')
rels = dom_parser.parse_dom(rels, 'a', attrs={'href': re.compile('#stream_\d*')}, req='href')
rels = [(re.findall('stream_(\d+)', i.attrs['href']), re.findall('flag-(\w{2})', i.content)) for i in rels if i]
rels = [(i[0][0], ['subbed'] if i[1][0] != 'de' else []) for i in rels if i[0] and 'de' in i[1]]
for id, info in rels:
rel = dom_parser.parse_dom(r, 'div', attrs={'id': 'stream_%s' % id})
rel = [(dom_parser.parse_dom(i, 'div', attrs={'id': 'streams_episodes_%s' % id}), dom_parser.parse_dom(i, 'tr')) for i in rel]
rel = [(i[0][0].content, [x for x in i[1] if 'fa-desktop' in x.content]) for i in rel if i[0] and i[1]]
rel = [(i[0], dom_parser.parse_dom(i[1][0].content, 'td')) for i in rel if i[1]]
rel = [(i[0], re.findall('\d{3,4}x(\d{3,4})$', i[1][0].content)) for i in rel if i[1]]
rel = [(i[0], source_utils.label_to_quality(i[1][0])) for i in rel if len(i[1]) > 0]
for html, quality in rel:
try:
s = dom_parser.parse_dom(html, 'a', attrs={'href': re.compile('#streams_episodes_%s_\d+' % id)})
s = [(dom_parser.parse_dom(i, 'div', attrs={'data-loop': re.compile('\d+')}, req='data-loop'), dom_parser.parse_dom(i, 'span')) for i in s]
s = [(i[0][0].attrs['data-loop'], [x.content for x in i[1] if '<strong' in x.content]) for i in s if i[0]]
s = [(i[0], re.findall('<.+?>(\d+)</.+?> (.+?)$', i[1][0])) for i in s if len(i[1]) > 0]
s = [(i[0], i[1][0]) for i in s if len(i[1]) > 0]
s = [(i[0], int(i[1][0]), re.findall('Episode (\d+):', i[1][1]), re.IGNORECASE) for i in s if len(i[1]) > 1]
s = [(i[0], i[1], int(i[2][0]) if len(i[2]) > 0 else -1) for i in s]
s = [(i[0], i[2] if i[2] >= 0 else i[1]) for i in s]
s = [i[0] for i in s if i[1] == episode][0]
enc = dom_parser.parse_dom(html, 'div', attrs={'id': re.compile('streams_episodes_%s_%s' % (id, s))}, req='data-enc')[0].attrs['data-enc']
hosters = dom_parser.parse_dom(html, 'a', attrs={'href': re.compile('#streams_episodes_%s_%s' % (id, s))})
hosters = [dom_parser.parse_dom(i, 'i', req='class') for i in hosters]
hosters = [re.findall('hoster-(\w+)', ' '.join([x.attrs['class'] for x in i])) for i in hosters if i][0]
hosters = [(source_utils.is_host_valid(re.sub('(co|to|net|pw|sx|tv|moe|ws|icon)$', '', i), hostDict), i) for i in hosters]
hosters = [(i[0][1], i[1]) for i in hosters if i[0] and i[0][0]]
info = ' | '.join(info)
for source, hoster in hosters:
sources.append({'source': source, 'quality': quality, 'language': 'de', 'url': [enc, hoster], 'info': info, 'direct': False, 'debridonly': False, 'checkquality': True})
except:
pass
return sources
except:
return sources
示例5: sources
def sources(self, url, hostDict, locDict):
sources = []
try:
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
query = self.search_link % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
result = client.request(query, mobile=True, timeout=20, output='extended')
r = json.loads(result[0])
r = r['data']['films']
years = [str(data['year']), str(int(data['year']) + 1), str(int(data['year']) - 1)]
#print r
if 'episode' in data:
r = [i for i in r if cleantitle.get(title) == cleantitle.get(i['title'])]
r = [(i,re.sub('[^0-9]', '', str(i['publishDate']))) for i in r ]
r = [i[0] for i in r if any(x in i[1] for x in years)][0]
result = client.request(urlparse.urljoin(self.base_link, self.sources_link % r['id']), mobile=True, headers=result[4], output='extended')
r = json.loads(result[0])
r = [i for i in r['data']['chapters'] if i['title'].replace('0','').lower() == 's%se%s' %(data['season'],data['episode'])][0]
else:
r = [i for i in r if cleantitle.get(title) == cleantitle.get(i['title'])]
r = [i for i in r if any(x in i['publishDate'] for x in years)][0]
#print r
result = client.request(urlparse.urljoin(self.base_link, self.sources_link % r['id']), mobile=True, headers=result[4], output='extended')
r = json.loads(result[0])
r = r['data']['chapters'][0]
result = client.request(urlparse.urljoin(self.base_link, self.stream_link % r['id']), mobile=True,
headers=result[4], output='extended')
r = json.loads(result[0])
r = [(i['quality'], i['server'], self._decrypt(i['stream'])) for i in r['data']]
sources = []
for i in r:
try:
valid, hoster = source_utils.is_host_valid(i[2], hostDict)
if not valid: continue
urls, host, direct = source_utils.check_directstreams(i[2], hoster)
for x in urls:
q = x['quality'] if host == 'gvideo' else source_utils.label_to_quality(i[0])
u = x['url'] if host == 'gvideo' else i[2]
sources.append({'source': host, 'quality': q, 'language': 'en', 'url': u, 'direct': direct, 'debridonly': False})
except:
pass
return sources
except Exception as e:
return sources
示例6: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
url = data.get('url')
season = data.get('season')
episode = data.get('episode')
abs_episode = 0
if season and episode:
abs_episode = str(tvmaze.tvMaze().episodeAbsoluteNumber(data.get('tvdb'), int(season), int(episode)))
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = r.decode('cp1251').encode('utf-8')
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'players'}, req='data-player')
r = [(i.attrs['data-player'], dom_parser.parse_dom(i, 'a', req='href')) for i in r]
r = [(i[0], i[1][0].attrs['href']) for i in r if i[1]]
for post_id, play_url in r:
i = client.request(play_url, referer=url, output='extended')
headers = i[3]
headers.update({'Cookie': i[2].get('Set-Cookie')})
i = client.request(urlparse.urljoin(self.base_link, self.player_link), post={'post_id': post_id}, headers=headers, referer=i, XHR=True)
i = json.loads(i).get('message', {}).get('translations', {}).get('flash', {})
for title, link in i.iteritems():
try:
link = self.decode_direct_media_url(link)
if link.endswith('.txt'):
link = self.decode_direct_media_url(client.request(link))
link = json.loads(link).get('playlist', [])
link = [i.get('playlist', []) for i in link]
link = [x.get('file') for i in link for x in i if (x.get('season') == season and x.get('serieId') == episode) or (x.get('season') == '0' and x.get('serieId') == abs_episode)][0]
urls = [(source_utils.label_to_quality(q), self.format_direct_link(link, q)) for q in self.get_qualitys(link)]
urls = [{'quality': x[0], 'url': x[1]} for x in urls if x[0] in ['SD', 'HD']] # filter premium
for i in urls: sources.append({'source': 'CDN', 'quality': i['quality'], 'info': title, 'language': 'ru', 'url': i['url'], 'direct': True, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例7: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = dom_parser.parse_dom(r, 'div', attrs={'id': 'mediaplayer'})
r = [i.attrs['src'] for i in dom_parser.parse_dom(r, 'iframe', req='src')]
for i in r:
try:
if 'vidnow.' in i:
i = client.request(i, referer=url)
gdata = [(match[1], match[0]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''', i, re.DOTALL)]
gdata += [(match[0], match[1]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''', i, re.DOTALL)]
gdata = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in gdata]
for u, q in gdata:
try:
tag = directstream.googletag(u)
if tag:
sources.append({'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'de', 'url': u, 'direct': True, 'debridonly': False})
else:
sources.append({'source': 'CDN', 'quality': q, 'language': 'de', 'url': u, 'direct': True,'debridonly': False})
except:
pass
i = dom_parser.parse_dom(i, 'div', attrs={'id': 'myElement'})
i = dom_parser.parse_dom(i, 'iframe', req='src')[0].attrs['src']
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
urls = []
if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i);
if 'google' in i and not urls and directstream.googletag(i): host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(i)[0]['quality'], 'url': i}]
elif 'ok.ru' in i: host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
else: direct = False; urls = [{'quality': 'SD', 'url': i}]
for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'ko', 'url': x['url'], 'direct': direct, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例8: mz_server
def mz_server(self,url):
try:
scraper = cfscrape.create_scraper()
urls = []
data = scraper.get(url).content
data = re.findall('''file:\s*["']([^"']+)",label:\s*"(\d{3,}p)"''', data, re.DOTALL)
for url, label in data:
label = source_utils.label_to_quality(label)
if label == 'SD': continue
urls.append({'url': url, 'quality': label})
return urls
except:
return url
示例9: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
url = urlparse.urljoin(self.base_link, data.get('url', ''))
imdb = data.get('imdb')
season = data.get('season')
episode = data.get('episode')
if season and episode and imdb:
r = urllib.urlencode({'val': 's%se%s' % (season, episode), 'IMDB': imdb})
r = client.request(urlparse.urljoin(self.base_link, self.episode_link), XHR=True, post=r)
else:
r = client.request(url)
l = dom_parser.parse_dom(r, 'select', attrs={'id': 'sel_sprache'})
l = dom_parser.parse_dom(l, 'option', req='id')
r = [(dom_parser.parse_dom(r, 'div', attrs={'id': i.attrs['id']})) for i in l if i.attrs['id'] == 'deutsch']
r = [(i[0], dom_parser.parse_dom(i[0], 'option', req='id')) for i in r]
r = [(id.attrs['id'], dom_parser.parse_dom(content, 'div', attrs={'id': id.attrs['id']})) for content, ids in r for id in ids]
r = [(re.findall('hd(\d{3,4})', i[0]), dom_parser.parse_dom(i[1], 'a', req='href')) for i in r if i[1]]
r = [(i[0][0] if i[0] else '0', [x.attrs['href'] for x in i[1]]) for i in r if i[1]]
r = [(source_utils.label_to_quality(i[0]), i[1]) for i in r]
for quality, urls in r:
for link in urls:
try:
data = urlparse.parse_qs(urlparse.urlparse(link).query, keep_blank_values=True)
if 'm' in data:
data = data.get('m')[0]
link = base64.b64decode(data)
link = link.strip()
valid, host = source_utils.is_host_valid(link, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': quality, 'language': 'de', 'url': link, 'direct': False, 'debridonly': False, 'checkquality': True})
except:
pass
return sources
except:
return sources
示例10: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
url = urlparse.urljoin(self.base_link, data.get('url'))
episode = data.get('episode')
r = client.request(url)
aj = self.__get_ajax_object(r)
b = dom_parser.parse_dom(r, 'img', attrs={'class': 'dgvaup'}, req='data-img')[0].attrs['data-img']
if episode:
r = dom_parser.parse_dom(r, 'a', attrs={'class': 'btn-stream-ep', 'data-episode': episode}, req=['data-episode', 'data-server'])
else:
r = dom_parser.parse_dom(r, 'div', attrs={'id': 'lang-de'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie'})
r = dom_parser.parse_dom(r, 'a', attrs={'class': 'btn-stream'}, req=['data-episode', 'data-server'])
r = [(i.attrs['data-episode'], i.attrs['data-server']) for i in r]
for epi, server in r:
try:
x = {'action': aj.get('load_episodes'), 'episode': epi, 'pid': aj.get('postid'), 'server': server, 'nonce': aj.get('nonce'), 'b': b}
x = client.request(aj.get('ajax_url'), post=x, XHR=True, referer=url)
x = json.loads(x)
q = source_utils.label_to_quality(x.get('q'))
x = json.loads(base64.decodestring(x.get('u')))
u = source_utils.evp_decode(x.get('ct'), base64.decodestring(b), x.get('s').decode("hex"))
u = u.replace('\/', '/').strip('"')
valid, host = source_utils.is_host_valid(u, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': q, 'language': 'de', 'url': u, 'direct': False, 'debridonly': False, 'checkquality': True})
except:
pass
return sources
except:
return sources
示例11: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
url = urlparse.urljoin(self.base_link, url)
result = client.request(url)
result = re.sub(r'[^\x00-\x7F]+', ' ', result)
pages = dom_parser.parse_dom(result, 'div', attrs={'class': 'item'}, req='data-id')
pages = [i.attrs['data-id'] for i in pages]
for page in pages:
try:
url = urlparse.urljoin(self.base_link, self.video_link)
result = client.request(url, post={'id': page})
if not result: continue
url = dom_parser.parse_dom(result, 'iframe', req='src')[0].attrs['src']
if url.startswith('//'): url = 'http:' + url
if url.startswith('/'): url = urlparse.urljoin(self.base_link, url)
valid, host = source_utils.is_host_valid(url, hostDict)
if valid: sources.append({'source': host, 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
if '.asp' not in url: continue
result = client.request(url)
captions = re.search('kind\s*:\s*(?:\'|\")captions(?:\'|\")', result)
if not captions: continue
matches = [(match[0], match[1]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)''', result, re.DOTALL | re.I)]
matches += [(match[1], match[0]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''', result, re.DOTALL | re.I)]
result = [(source_utils.label_to_quality(x[0]), x[1].replace('\/', '/')) for x in matches]
result = [(i[0], i[1]) for i in result if not i[1].endswith('.vtt')]
for quality, url in result: sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例12: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
hostDict += ['akamaized.net', 'google.com', 'picasa.com', 'blogspot.com']
result = client.request(url, timeout=10)
dom = dom_parser.parse_dom(result, 'a', req='data-video')
urls = [i.attrs['data-video'] if i.attrs['data-video'].startswith('https') else 'https:' + i.attrs['data-video'] for i in dom]
for url in urls:
dom = []
if 'vidnode.net' in url:
result = client.request(url, timeout=10)
dom = dom_parser.parse_dom(result, 'source', req=['src','label'])
dom = [(i.attrs['src'] if i.attrs['src'].startswith('https') else 'https:' + i.attrs['src'], i.attrs['label']) for i in dom if i]
elif 'ocloud.stream' in url:
result = client.request(url, timeout=10)
base = re.findall('<base href="([^"]+)">', result)[0]
hostDict += [base]
dom = dom_parser.parse_dom(result, 'a', req=['href','id'])
dom = [(i.attrs['href'].replace('./embed',base+'embed'), i.attrs['id']) for i in dom if i]
dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)", client.request(i[0]))[0], i[1]) for i in dom if i]
if dom:
try:
for r in dom:
valid, hoster = source_utils.is_host_valid(r[0], hostDict)
if not valid: continue
quality = source_utils.label_to_quality(r[1])
urls, host, direct = source_utils.check_directstreams(r[0], hoster)
for x in urls:
if direct: size = source_utils.get_size(x['url'])
if size: sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False, 'info': size})
else: sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
except: pass
else:
valid, hoster = source_utils.is_host_valid(url, hostDict)
if not valid: continue
try:
url.decode('utf-8')
sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例13: __get_moonwalk
def __get_moonwalk(url, ref, info=''):
try:
host = urlparse.urlparse(url)
host = '%s://%s' % (host.scheme, host.netloc)
r = client.request(url, referer=ref, output='extended')
headers = r[3]
headers.update({'Cookie': r[2].get('Set-Cookie')})
r = r[0]
csrf = re.findall('name="csrf-token" content="(.*?)"', r)[0]
story = re.findall('''["']X-CSRF-Token["']\s*:\s*[^,]+,\s*["']([\w\-]+)["']\s*:\s*["'](\w+)["']''', r)[0]
headers.update({'X-CSRF-Token': csrf, story[0]: story[1]})
for i in re.findall('window\[(.*?)\]', r):
r = r.replace(i, re.sub('''["']\s*\+\s*["']''', '', i))
varname, post_url = re.findall('''var\s*(\w+)\s*=\s*["'](.*?/all/?)["']\s*;''', r)[0]
jsid = re.findall('''\.post\(\s*%s\s*,\s*([^(\);)]+)''' % varname, r)[0]
jsdata = re.findall('(?:var\s*)?%s\s*=\s*({.*?})' % re.escape(jsid), r, re.DOTALL)[0]
jsdata = re.sub(r'([\{\s,])(\w+)(:)', r'\1"\2"\3', jsdata)
jsdata = re.sub(r'''(?<=:)\s*\'''', ' "', jsdata)
jsdata = re.sub(r'''(?<=\w)\'''', '"', jsdata)
jsdata = re.sub(''':\s*\w+\s*\?[^,}]+''', ': 0', jsdata)
jsdata = re.sub(''':\s*[a-zA-Z]+[^,}]+''', ': 0', jsdata)
jsdata = json.loads(jsdata)
mw_key = re.findall('''var\s*mw_key\s*=\s*["'](\w+)["']''', r)[0]
newatt = re.findall('''%s\[["']([^=]+)["']\]\s*=\s*["']([^;]+)["']''' % re.escape(jsid), r)[0]
newatt = [re.sub('''["']\s*\+\s*["']''', '', i) for i in newatt]
jsdata.update({'mw_key': mw_key, newatt[0]: newatt[1]})
r = client.request(urlparse.urljoin(host, post_url), post=jsdata, headers=headers, XHR=True)
r = json.loads(r).get('mans', {}).get('manifest_m3u8')
r = client.request(r, headers=headers)
r = [(i[0], i[1]) for i in re.findall('#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+).*?(http.*?(?:\.abst|\.f4m|\.m3u8)).*?', r, re.DOTALL) if i]
r = [(source_utils.label_to_quality(i[0]), i[1] + '|%s' % urllib.urlencode(headers)) for i in r]
r = [{'quality': i[0], 'url': i[1], 'info': info} for i in r]
return r
except:
return []
示例14: sources
def sources(self, url, hostDict, locDict):
sources = []
req = requests.Session()
headers = {'User-Agent': client.randomagent(), 'Origin': 'http://imdark.com', 'Referer': 'http://imdark.com',
'X-Requested-With': 'XMLHttpRequest'}
try:
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
query = urllib.quote_plus(title).lower()
result = req.get(self.base_link, headers=headers).text
darksearch = re.findall(r'darkestsearch" value="(.*?)"', result)[0]
result = req.get(self.base_link + self.search_link % (query, darksearch), headers=headers).text
r = client.parseDOM(result, 'div', attrs={'id':'showList'})
r = re.findall(r'<a\s+style="color:white;"\s+href="([^"]+)">([^<]+)', r[0])
r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and data['year'] in i[1]][0]
url = r[0]
print("INFO - " + url)
result = req.get(url, headers=headers).text
nonce = re.findall(r"nonce = '(.*?)'", result)[0]
tipi = re.findall(r'tipi = (.*?);', result)[0]
postData = {'action':'getitsufiplaying', 'tipi':tipi, 'jhinga':nonce}
result = req.post(self.base_link + self.ajax_link, data=postData, headers=headers).text
r = re.findall(r'"src":"(.*?)","type":"(.*?)","data-res":"(\d*?)"', result)
linkHeaders = 'Referer=http://imdark.com/&User-Agent=' + urllib.quote(client.randomagent()) + '&Cookie=' + urllib.quote('mykey123=mykeyvalue')
for i in r:
print(str(i))
try:
q = source_utils.label_to_quality(i[2])
sources.append({'source': 'CDN', 'quality': q, 'info': i[1].replace('\\', ''), 'language': 'en',
'url': i[0].replace('\\','') + '|' + linkHeaders,
'direct': True, 'debridonly': False})
except:
traceback.print_exc()
pass
for i in sources:
print("INFO SOURCES " + str(i))
return sources
except:
traceback.print_exc()
return sources
示例15: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'watch_video'})
r = [i.attrs['data-src'] for i in dom_parser.parse_dom(r, 'iframe', req='data-src')]
for i in r:
try:
if 'k-vid' in i:
i = client.request(i, referer=url)
i = dom_parser.parse_dom(i, 'div', attrs={'class': 'videocontent'})
gvid = dom_parser.parse_dom(i, 'source', req='src')
gvid = [(g.attrs['src'], g.attrs['label'] if 'label' in g.attrs else 'SD') for g in gvid]
gvid = [(x[0], source_utils.label_to_quality(x[1])) for x in gvid if x[0] != 'auto']
for u, q in gvid:
try:
tag = directstream.googletag(u)
if tag:
sources.append({'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'ko', 'url': u, 'direct': True, 'debridonly': False})
else:
sources.append({'source': 'CDN', 'quality': q, 'language': 'ko', 'url': u, 'direct': True, 'debridonly': False})
except:
pass
i = dom_parser.parse_dom(i, 'iframe', attrs={'id': 'embedvideo'}, req='src')[0].attrs['src']
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'ko', 'url': i, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources