本文整理汇总了Python中resources.lib.libraries.client.source函数的典型用法代码示例。如果您正苦于以下问题:Python source函数的具体用法?Python source怎么用?Python source使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了source函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: resolve
def resolve(self, url):
link = client.source(url)
url=re.compile('src="(.+?)" style').findall(link)[0]
link = client.source(url)
try:
url=re.compile("window.atob\('(.+?)'\)\)").findall(link)[0]
func_count = len(re.findall('window\.atob', link))
print(">>>>>>>> ILE",func_count)
for _i in xrange(func_count):
url = base64.decodestring(url)
url=re.compile("<source src='(.+?)'").findall(url)[0]
control.log(">> u2 %s |ENcoded %s",url, resolvers.request(url))
url = resolvers.request(url)
except:
try:
url=re.compile('src="(.+?)"').findall(link)[0]
host = urlparse.urlparse(url).netloc
host = host.replace('www.', '').replace('embed.', '')
host = host.rsplit('.', 1)[0]
host = host.lower()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
url = resolvers.request(url)
except:pass
#print("--------------->>>>> URL",url)
return url
示例2: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
self.base_link = random.choice([self.base_link_1, self.base_link_2])
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
video_id = re.compile('video_id *= *[\'|\"](.+?)[\'|\"]').findall(result)[0]
post = {'video_id': video_id}
result = client.source(urlparse.urljoin(self.base_link, self.info_link), post=post)
u = [i for i in result.split('&') if 'google' in i][0]
u = urllib.unquote_plus(u)
u = [urllib.unquote_plus(i.split('|')[-1]) for i in u.split(',')]
u = [googleplus.tag(i)[0] for i in u]
u = [i for i in u if i['quality'] in ['1080p', 'HD']]
for i in u: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'Afdah', 'url': i['url']})
return sources
except:
return sources
示例3: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
links = client.parseDOM(result, 'tr')
links = [(client.parseDOM(i, 'a', attrs = {'class': 'watch'}, ret='data-iframe')[0],
client.parseDOM(i, 'img', ret='alt')[0],
client.parseDOM(i, 'td', attrs={'class':'text-center'})[0]) for i in links]
for i in links:
try:
result = client.source(i[0].decode('base64'))
url= client.parseDOM(result, 'iframe', ret='src')[0]
url = url.encode('utf-8')
print ("Q",videoquality.solvequality(url),url)
sources.append({'source': i[1], 'quality': 'SD', 'provider': 'Alltube', 'url': url, 'vtype':i[2]})
except:
pass
return sources
except:
return sources
示例4: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
content = re.compile('(.+?)\?episode=\d*$').findall(url)
content = 'movie' if len(content) == 0 else 'episode'
try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
except: pass
url = urlparse.urljoin(self.base_link, url) + '/watching.html'
result = client.source(url)
movie = client.parseDOM(result, 'div', ret='movie-id', attrs = {'id': 'media-player'})[0]
control.log('####### %s MOVIE' % movie)
try:
quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0].lower()
control.log('####### %s MOVIE quality ' % quality)
except: quality = 'hd'
if quality == 'cam' or quality == 'ts': quality = 'CAM'
elif quality == 'hd': quality = 'HD'
else: quality = 'SD'
url = '/movie/loadepisodes/%s' % movie
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
result = client.parseDOM(result, 'div', attrs = {'class': 'les-content'})
result = zip(client.parseDOM(result, 'a', ret='onclick'), client.parseDOM(result, 'a', ret='episode-id'), client.parseDOM(result, 'a'))
result = [(re.sub('[^0-9]', '', i[0].split(',')[0]), re.sub('[^0-9]', '', i[0].split(',')[-1]), i[1], ''.join(re.findall('(\d+)', i[2])[:1])) for i in result]
result = [(i[0], i[1], i[2], i[3]) for i in result]
if content == 'episode': result = [i for i in result if i[3] == '%01d' % int(episode)]
links = [('movie/load_episode/%s/%s' % (i[2], i[1]), 'gvideo') for i in result if 2 <= int(i[0]) <= 11]
for i in links: sources.append({'source': i[1], 'quality': quality, 'provider': 'Muchmoviesv2', 'url': i[0]})
links = []
links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), 'openload') for i in result if i[0] == '14']
#links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), 'videomega.tv') for i in result if i[0] == '13']
#links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), 'videowood.tv') for i in result if i[0] == '12']
#for i in links: sources.append({'source': i[1], 'quality': quality, 'provider': 'Onemovies', 'url': i[0], 'direct': False, 'debridonly': False})
for i in links: sources.append({'source': i[1], 'quality': quality, 'provider': 'Muchmoviesv2', 'url': i[0]})
control.log('####### MOVIE sources %s' % sources)
return sources
#for u in url: sources.append({'source': 'Muchmovies', 'quality': quality, 'provider': 'Muchmoviesv2', 'url': u})
except:
return sources
示例5: get_episode
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
if url == None:
return
tvdb_link = self.tvdb_link % (self.tvdb_key, tvdb, int(season), int(episode))
result = client.source(tvdb_link)
num = client.parseDOM(result, "absolute_number")[0]
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
result = result.decode("iso-8859-1").encode("utf-8")
result = client.parseDOM(result, "tr", attrs={"class": ""})
result = [
(client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "td", attrs={"class": "epnum"})[0])
for i in result
]
result = [i[0] for i in result if num == i[1]][0]
try:
url = re.compile("//.+?(/.+)").findall(result)[0]
except:
url = result
url = client.replaceHTMLCodes(url)
url = url.encode("utf-8")
return url
except:
return
示例6: resolve
def resolve(self, url):
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, "") for i in data])
headers = {"X-Requested-With": "XMLHttpRequest"}
now = time.localtime()
url = "/ajax/film/episode?hash_id=%s&f=&p=%s" % (data["hash_id"], now.tm_hour + now.tm_min)
url = urlparse.urljoin(self.base_link, url)
result = client.source(url, headers=headers, referer=data["referer"])
result = json.loads(result)
grabber = {"flash": 1, "json": 1, "s": now.tm_min, "link": result["videoUrlHash"], "_": int(time.time())}
grabber = result["grabber"] + "?" + urllib.urlencode(grabber)
result = client.source(grabber, headers=headers, referer=url)
result = json.loads(result)
url = [(re.findall("(\d+)", i["label"]), i["file"]) for i in result if "label" in i and "file" in i]
url = [(int(i[0][0]), i[1]) for i in url if len(i[0]) > 0]
url = sorted(url, key=lambda k: k[0])
url = url[-1][1]
url = client.request(url, output="geturl")
if "requiressl=yes" in url:
url = url.replace("http://", "https://")
else:
url = url.replace("https://", "http://")
return url
except:
return
示例7: get_sources
def get_sources(self, url):
logger.debug('%s SOURCES URL %s' % (self.__class__, url))
try:
quality = ''
sources = []
if url == None: return sources
try: result = client.source(url, headers=self.headers)
except: result = ''
result = json.loads(result)
try :
url = result['resultObj']['src']
url = url.replace('http://','https://').replace('/z/','/i/').replace('manifest.f4m', 'master.m3u8').replace('2000,_STAR.','2000,3000,4500,_STAR.')
cookie = client.source(url, headers=self.headers, output='cookie')
result = client.source(url, headers=self.headers)
match = re.compile("BANDWIDTH=[0-9]+,RESOLUTION=[0-9]+x(.+?),[^\n]*\n([^\n]*)\n").findall(result)
if match:
for (res, url) in match:
try :
host = 'hotstar'
quality = self.res_map[res]
url = '%s|Cookie=%s' % (url, cookie)
sources.append({'source': host, 'parts': '1', 'quality': quality, 'provider': 'Hotstar', 'url': url, 'direct':True})
except:
pass
except:
pass
logger.debug('%s SOURCES [%s]' % (__name__,sources))
return sources
except:
return sources
示例8: resolve
def resolve(self, url):
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
headers = {'X-Requested-With': 'XMLHttpRequest'}
now = time.localtime()
url = '/ajax/film/episode?hash_id=%s&f=&p=%s' % (data['hash_id'], now.tm_hour + now.tm_min)
url = urlparse.urljoin(self.base_link, url)
result = client.source(url, headers=headers, referer=data['referer'])
result = json.loads(result)
grabber = {'flash': 1, 'json': 1, 's': now.tm_min, 'link': result['videoUrlHash'], '_': int(time.time())}
grabber = result['grabber'] + '?' + urllib.urlencode(grabber)
result = client.source(grabber, headers=headers, referer=url)
result = json.loads(result)
url = [(re.findall('(\d+)', i['label']), i['file']) for i in result if 'label' in i and 'file' in i]
url = [(int(i[0][0]), i[1]) for i in url if len(i[0]) > 0]
url = sorted(url, key=lambda k: k[0])
url = url[-1][1]
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
return
示例9: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
query = urlparse.urlparse(url).query
try: query = '%02d' % int(re.compile('E(\d*)$').findall(query)[0])
except: query = ''
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
result = client.parseDOM(result, 'select', attrs = {'id': 'myDropdown'})[0]
result = zip(client.parseDOM(result, 'option', ret='value'), client.parseDOM(result, 'option'))
result = [i[0] for i in result if query.endswith(i[1]) or query == ''][0]
url = urlparse.urljoin(self.base_link, result)
url = client.source(url, output='geturl')
if not 'google' in url: raise Exception()
url = url.split('get_video_info')[0]
url = resolvers.request(url)
for i in url: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'MVsnap', 'url': i['url']})
return sources
except:
return sources
示例10: get_episode
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
if url == None: return
url = urlparse.urljoin(self.base_link, url)
season, episode = '%01d' % int(season), '%01d' % int(episode)
result = client.source(url)
if not season == '1':
url = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'season-.+?'})
url = [i for i in url if '/%s-sezon-' % season in i][0]
result = client.source(url)
result = client.parseDOM(result, 'a', ret='href')
result = [i for i in result if '%s-sezon-%s-bolum-' % (season, episode) in i][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例11: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
myurl = urlparse.urljoin(self.base_link, '/forum/' + url)
result = client2.http_get(myurl)
result10 = result
result10 = client.parseDOM(result10, 'div', attrs={'id': '5throw'})[0]
result10 = client.parseDOM(result10, 'a', attrs={'rel': 'nofollow'}, ret='href')
mquality = 'HD'
if '1080'in url: mquality = '1080p'
for i in result10:
if 'mail.ru' in i:
myresolve = resolvers.request(i)
sources.append({'source': 'MAIL.RU', 'quality': mquality, 'provider': 'Dayt', 'url': myresolve})
if 'yadi.sk' in i:
myresolve = resolvers.request(i)
sources.append({'source': 'YADISK', 'quality': mquality, 'provider': 'Dayt', 'url': myresolve})
result = client.parseDOM(result, 'iframe', ret='src')
result = [i for i in result if 'pasep' in i][0]
result = client.source(result)
result = client.parseDOM(result, 'iframe', ret='src')[0]
result = client.source(result)
result = client.parseDOM(result, 'iframe', ret='src')[0]
links = resolvers.request(result)
for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Dayt', 'url': i[0]})
return sources
except:
return sources
示例12: get_show
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
result = client.source(self.base_link, headers=self.headers)
if not "'index show'" in result:
cookie = client.source(self.sign_link, headers=self.headers, post=self.key_link, output="cookie")
result = client.source(self.base_link, headers=self.headers, cookie=cookie)
result = client.parseDOM(result, "div", attrs={"class": "index show"})
result = [
(
client.parseDOM(i, "a", attrs={"class": "name"})[0],
client.parseDOM(i, "span", attrs={"class": "value"})[0],
client.parseDOM(i, "a", ret="href")[0],
)
for i in result
]
tvshowtitle = cleantitle.tv(tvshowtitle)
years = [str(year), str(int(year) + 1), str(int(year) - 1)]
result = [i for i in result if any(x in i[1] for x in years)]
result = [i[2] for i in result if tvshowtitle == cleantitle.tv(i[0])][0]
try:
url = re.compile("//.+?(/.+)").findall(result)[0]
except:
url = result
url = client.replaceHTMLCodes(url)
url = url.encode("utf-8")
return url
except:
return
示例13: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
query = urlparse.urljoin(self.base_link, self.ajax_link)
post = re.compile('var\s*view_id\s*=\s*"(\d*)"').findall(result)[0]
post = self.player_link % post
result = client.source(query, post=post, headers=self.headers)
result = json.loads(result)
result = result['data']
result = re.compile('"file"\s*:\s*"(.+?)".+?"label"\s*:\s*"(\d*p)"').findall(result)
links = [{'url': i[0], 'quality': i[1]} for i in result if 'google' in i[0]]
links += [{'url': '%s|User-Agent=%s&Referer=%s' % (i[0], urllib.quote_plus(client.agent()), urllib.quote_plus(url)), 'quality': i[1]} for i in result if not 'google' in i[0]]
try: sources.append({'source': 'GVideo', 'quality': '1080p', 'provider': 'Dizigold', 'url': [i['url'] for i in links if i['quality'] == '1080p'][0]})
except: pass
try: sources.append({'source': 'GVideo', 'quality': 'HD', 'provider': 'Dizigold', 'url': [i['url'] for i in links if i['quality'] == '720p'][0]})
except: pass
try: sources.append({'source': 'GVideo', 'quality': 'SD', 'provider': 'Dizigold', 'url': [i['url'] for i in links if i['quality'] == '480p'][0]})
except: sources.append({'source': 'GVideo', 'quality': 'SD', 'provider': 'Dizigold', 'url': [i['url'] for i in links if i['quality'] == '360p'][0]})
return sources
except:
return sources
示例14: resolve
def resolve(self, url):
link = client.source(url)
url=re.compile('src="(.+?)" style').findall(link)[0]
link = client.source(url)
try:
url=re.compile("window.atob\('(.+?)'\)\)").findall(link)[0]
content=base64.b64decode(url)
data=base64.b64decode(content)
url=re.compile("<source src='(.+?)'").findall(data)[0]
url = url + 'User-Agent%3DMozilla%2F5.0%20(X11%3B%20Linux%20x86_64)%20AppleWebKit%2F537.36%20(KHTML%2C%20like%20Gecko)%20Chrome%2F48.0.2564.82%20Safari%2F537.36%27'
except:
try:
url=re.compile('src="(.+?)"').findall(link)[0]
host = urlparse.urlparse(url).netloc
host = host.replace('www.', '').replace('embed.', '')
host = host.rsplit('.', 1)[0]
host = host.lower()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
url = resolvers.request(url)
except:pass
return url
示例15: get_show
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
query = urlparse.urljoin(self.base_link, self.ajax_link)
post = self.search_link % (urllib.quote_plus(tvshowtitle))
result = client.source(query, post=post, headers=self.headers)
result = json.loads(result)
tvshowtitle = cleantitle.tv(tvshowtitle)
years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h3'), re.compile('<h5>.+?(\d{4}).+?</h5>').findall(i)) for i in result]
result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])]
result = [i[0] for i in result if any(x in i[2] for x in years)][0]
url = urlparse.urljoin(self.base_link, result)
result = client.source(url)
url = client.parseDOM(result, 'div', ret='value', attrs = {'id': 'icerikid'})[0]
url = url.encode('utf-8')
return url
except:
return