本文整理汇总了Python中resources.lib.modules.client.source函数的典型用法代码示例。如果您正苦于以下问题:Python source函数的具体用法?Python source怎么用?Python source使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了source函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: movie
def movie(self, imdb, title, year):
try:
t = cleantitle.get(title)
query = '%s %s' % (title, year)
query = base64.b64decode(self.search_link) % urllib.quote_plus(query)
result = client.source(query)
result = json.loads(result)['results']
result = [(i['url'], i['titleNoFormatting']) for i in result]
result = [(i[0], re.findall('(?:^Ver Online |^Ver |)(.+?)(?: HD |)\((\d{4})\)', i[1])) for i in result]
result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0]
r = [i for i in result if t == cleantitle.get(i[1]) and year == i[2]]
if len(r) == 0:
t = 'http://www.imdb.com/title/%s' % imdb
t = client.source(t, headers={'Accept-Language':'ar-AR'})
t = client.parseDOM(t, 'title')[0]
t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip()
t = cleantitle.get(t)
r = [i for i in result if t == cleantitle.get(i[1]) and year == i[2]]
try: url = re.findall('//.+?(/.+)', r[0][0])[0]
except: url = r[0][0]
try: url = re.findall('(/.+?/.+?/)', url)[0]
except: pass
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
pass
示例2: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
if (self.user == '' or self.password == ''): raise Exception()
headers = {'X-Requested-With': 'XMLHttpRequest'}
url = urlparse.urljoin(self.base_link, self.search_link)
post = {'q': tvshowtitle.rsplit(':', 1)[0], 'limit': '100', 'timestamp': int(time.time() * 1000), 'verifiedCheck': ''}
post = urllib.urlencode(post)
result = client.source(url, post=post, headers=headers)
result = json.loads(result)
tvshowtitle = cleantitle.get(tvshowtitle)
result = [i for i in result if i['meta'].strip().split(' ')[0].lower() == 'tv']
result = [i for i in result if tvshowtitle == cleantitle.get(i['title'])][:2]
if len(result) > 1:
result = [(i, urlparse.urljoin(self.base_link, i['permalink'])) for i in result]
result = [(i[0], str(client.source(i[1]))) for i in result]
result = [(i[0], re.compile('/(tt\d+)').findall(i[1])) for i in result]
result = [i[0] for i in result if len(i[1]) > 0 and imdb == i[1][0]]
result = result[0]['permalink']
url = urlparse.urljoin(self.base_link, result)
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例3: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
num = base64.b64decode('aHR0cDovL3RoZXR2ZGIuY29tL2FwaS8xRDYyRjJGOTAwMzBDNDQ0L3Nlcmllcy8lcy9kZWZhdWx0LyUwMWQvJTAxZA==')
num = num % (tvdb, int(season), int(episode))
num = client.source(num)
num = client.parseDOM(num, 'absolute_number')[0]
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
result = result.decode('iso-8859-1').encode('utf-8')
result = client.parseDOM(result, 'tr', attrs = {'class': ''})
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'td', attrs = {'class': 'epnum'})) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [i[0] for i in result if num == i[1]][0]
url = urlparse.urljoin(self.base_link, result)
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例4: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
genre = 'http://www.imdb.com/title/%s/' % imdb
genre = client.source(genre)
genre = re.findall('href\s*=\s*[\'|\"](.+?)[\'|\"]', genre)
genre = [i for i in genre if '/genre/' in i]
genre = [i.split('/genre/')[-1].split('?')[0].lower() for i in genre]
if not 'animation' in genre: raise Exception()
query = self.search_link % (urllib.quote_plus(tvshowtitle))
query = urlparse.urljoin(self.base_link, query)
result = client.source(query)
result = result.decode('iso-8859-1').encode('utf-8')
tvshowtitle = cleantitle.get(tvshowtitle)
result = client.parseDOM(result, 'ol', attrs = {'id': 'searchresult'})[0]
result = client.parseDOM(result, 'h2')
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [(i[0], re.sub('<.+?>|</.+?>','', i[1])) for i in result]
result = [i for i in result if tvshowtitle == cleantitle.get(i[1])]
result = result[-1][0]
url = urlparse.urljoin(self.base_link, result)
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例5: resolve
def resolve(self, url):
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
headers = {'X-Requested-With': 'XMLHttpRequest'}
now = time.localtime()
url = '/ajax/film/episode?hash_id=%s&f=&p=%s' % (data['hash_id'], now.tm_hour + now.tm_min)
url = urlparse.urljoin(self.base_link, url)
result = client.source(url, headers=headers, referer=data['referer'])
result = json.loads(result)
grabber = {'flash': 1, 'json': 1, 's': now.tm_min, 'link': result['videoUrlHash'], '_': int(time.time())}
grabber = result['grabber'] + '?' + urllib.urlencode(grabber)
result = client.source(grabber, headers=headers, referer=url)
result = json.loads(result)
url = [(re.findall('(\d+)', i['label']), i['file']) for i in result if 'label' in i and 'file' in i]
url = [(int(i[0][0]), i[1]) for i in url if len(i[0]) > 0]
url = sorted(url, key=lambda k: k[0])
url = url[-1][1]
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
return
示例6: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
if (self.user == '' or self.password == ''): raise Exception()
if self.cookie == None: self.cookie = client.source(self.sign, post=self.post, headers=self.headers, cookie=self.lang, output='cookie')
url = urlparse.urljoin(self.base_link, self.tvsearch_link)
result = client.source(url, cookie='%s; %s' % (self.cookie, self.lang))
tvshowtitle = cleantitle.get(tvshowtitle)
years = ['%s' % str(year)]
result = client.parseDOM(result, 'div', attrs = {'class': 'index show'})
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs = {'class': 'name'}), client.parseDOM(i, 'span', attrs = {'class': 'value'})) for i in result]
result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
result = [i for i in result if tvshowtitle == cleantitle.get(i[1])]
result = [i[0] for i in result if any(x in i[2] for x in years)][0]
url = urlparse.urljoin(self.base_link, result)
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例7: movie
def movie(self, imdb, title, year):
try:
t = cleantitle.get(title)
query = '%s %s' % (title, year)
query = base64.b64decode(self.search_link) % urllib.quote_plus(query)
result = client.source(query)
result = json.loads(result)['results']
result = [(i['url'], i['titleNoFormatting']) for i in result]
result = [(i[0], re.findall('(?:^Ver |)(.+?)(?: HD |)\((\d{4})', i[1])) for i in result]
result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0]
r = [i for i in result if t == cleantitle.get(i[1]) and year == i[2]]
if len(r) == 0:
t = 'http://www.imdb.com/title/%s' % imdb
t = client.source(t, headers={'Accept-Language':'es-ES'})
t = client.parseDOM(t, 'title')[0]
t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip()
t = cleantitle.get(t)
r = [i for i in result if t == cleantitle.get(i[1]) and year == i[2]]
try: url = re.findall('//.+?(/.+)', r[0][0])[0]
except: url = r[0][0]
try: url = re.findall('(/.+?/.+?/)', url)[0]
except: pass
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
pass
try:
t = cleantitle.get(title)
query = self.search3_link % urllib.quote_plus(cleantitle.query(title))
query = urlparse.urljoin(self.base_link, query)
result = cloudflare.source(query)
result = re.sub(r'[^\x00-\x7F]+','', result)
r = result.split('<li class=')
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'i'), re.findall('\((\d{4})\)', i)) for i in r]
r = [(i[0][0], re.sub('\(|\)','', i[1][0]), i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
try: url = re.findall('//.+?(/.+)', r)[0]
except: url = r
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
pass
示例8: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
links = []
try:
try: url = re.compile('proxy\.link=([^"&]+)').findall(result)[0]
except: url = client.source(re.compile('proxy\.list=([^"&]+)').findall(result)[0])
url = url.split('*', 1)[-1].rsplit('<')[0]
dec = self._gkdecrypt(base64.b64decode('aUJocnZjOGdGZENaQWh3V2huUm0='), url)
if not 'http' in dec: dec = self._gkdecrypt(base64.b64decode('QjZVTUMxUms3VFJBVU56V3hraHI='), url)
url = directstream.google(dec)
links += [(i['url'], i['quality']) for i in url]
except:
pass
try:
url = 'http://miradetodo.com.ar/gkphp/plugins/gkpluginsphp.php'
post = client.parseDOM(result, 'div', attrs = {'class': 'player.+?'})[0]
post = post.replace('iframe', 'IFRAME')
post = client.parseDOM(post, 'IFRAME', ret='.+?')[0]
post = urlparse.parse_qs(urlparse.urlparse(post).query)
result = ''
try: result += client.source(url, post=urllib.urlencode({'link': post['id'][0]}))
except: pass
try: result += client.source(url, post=urllib.urlencode({'link': post['id1'][0]}))
except: pass
try: result += client.source(url, post=urllib.urlencode({'link': post['id2'][0]}))
except: pass
result = re.compile('"?link"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"').findall(result)
result = [(i[0].replace('\\/', '/'), i[1]) for i in result]
links += [(i[0], '1080p') for i in result if int(i[1]) >= 1080]
links += [(i[0], 'HD') for i in result if 720 <= int(i[1]) < 1080]
links += [(i[0], 'SD') for i in result if 480 <= int(i[1]) < 720]
if not 'SD' in [i[1] for i in links]: links += [(i[0], 'SD') for i in result if 360 <= int(i[1]) < 480]
except:
pass
for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'MiraDeTodo', 'url': i[0], 'direct': True, 'debridonly': False})
return sources
except:
return sources
示例9: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if not str(url).startswith('http'):
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
match = data['title'].replace(':', '').replace('\'', '').replace(' ', '-')
match = re.sub('\-+', '-', match.lower())
match = '/%s-%s' % (match, data['year'])
url = cache.get(self.usmovies_moviecache, 120)
url = [i for i in url if match in i][-1]
url = client.replaceHTMLCodes(url)
r = urlparse.urljoin(self.base_link, url)
result = client.source(r)
links = []
headers = {'Referer': r}
result = client.parseDOM(result, 'div', attrs = {'class': 'video-embed'})[0]
try:
post = re.findall('{link\s*:\s*"([^"]+)', result)[0]
post = urllib.urlencode({'link': post})
url = urlparse.urljoin(self.base_link, '/plugins/gkpluginsphp.php')
url = client.source(url, post=post, headers=headers)
url = json.loads(url)['link']
links += [i['link'] for i in url if 'link' in i]
except:
pass
try:
url = client.parseDOM(result, 'iframe', ret='.+?')[0]
url = client.source(url, headers=headers)
url = url.replace('\n', '')
url = re.findall('sources\s*:\s*\[(.+?)\]', url)[0]
url = re.findall('"file"\s*:\s*"(.+?)"', url)
links += [i.split()[0] for i in url]
except:
pass
for i in links:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'USmovies', 'url': i, 'direct': True, 'debridonly': False})
except: pass
return sources
except:
return sources
示例10: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
content = re.compile('(.+?)\?episode=\d*$').findall(url)
content = 'movie' if len(content) == 0 else 'episode'
try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
except: pass
result = client.source(url)
url = zip(client.parseDOM(result, 'a', ret='href', attrs = {'target': 'player_iframe'}), client.parseDOM(result, 'a', attrs = {'target': 'player_iframe'}))
url = [(i[0], re.compile('(\d+)').findall(i[1])) for i in url]
url = [(i[0], i[1][-1]) for i in url if len(i[1]) > 0]
if content == 'episode':
url = [i for i in url if i[1] == '%01d' % int(episode)]
links = [client.replaceHTMLCodes(i[0]) for i in url]
for u in links:
try:
headers = {'X-Requested-With': 'XMLHttpRequest', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0', 'Referer': u}
post = urlparse.parse_qs(urlparse.urlparse(u).query)['link'][0]
post = urllib.urlencode({'link': post})
url = 'http://player.pubfilm.com/smplayer/plugins/gkphp/plugins/gkpluginsphp.php'
url = client.source(url, post=post, headers=headers)
url = json.loads(url)
if 'gklist' in url:
url = client.source(u)
url = re.findall('sources\s*:\s*\[(.+?)\]', url)[0]
url = re.findall('"file"\s*:\s*"(.+?)"', url)
url = [i.split()[0].replace('\\/', '/') for i in url]
else:
url = url['link']
url = directstream.google(url)
url = [i['url'] for i in url]
for i in url:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Pubfilm', 'url': i, 'direct': True, 'debridonly': False})
except: pass
except:
pass
return sources
except:
return sources
示例11: movie
def movie(self, imdb, title, year):
try:
query = "%s %s" % (title.replace(":", " "), year)
query = base64.b64decode(self.search_link) % urllib.quote_plus(query)
result = client.source(query)
result = json.loads(result)["results"]
t = cleantitle.get(title)
years = ["(%s)" % str(year), "(%s)" % str(int(year) + 1), "(%s)" % str(int(year) - 1)]
result = [(i["url"], i["titleNoFormatting"]) for i in result]
result = [
(i[0], re.compile('(^Watch Full "|^Watch |^Xmovies8:|^xmovies8:|)(.+? [(]\d{4}[)])').findall(i[1]))
for i in result
]
result = [(i[0], i[1][0][-1]) for i in result if len(i[1]) > 0]
result = [i for i in result if t == cleantitle.get(i[1])]
result = [i[0] for i in result if any(x in i[1] for x in years)][0]
url = urlparse.urljoin(self.base_link, result)
url = urlparse.urlparse(url).path
url = "/".join(url.split("/")[:3]) + "/"
return url
except:
pass
try:
t = title.replace("'", "")
t = re.sub(r"[^a-zA-Z0-9\s]+", " ", t).lower().strip()
t = re.sub("\s\s+", " ", t)
t = "/movie/" + t.replace(" ", "-") + "-"
years = ["-%s" % str(year), "-%s" % str(int(year) + 1), "-%s" % str(int(year) - 1)]
query = base64.b64decode(self.search_link_2) % t
result = client.source(query)
result = json.loads(result)["results"]
result = [i["contentNoFormatting"] for i in result]
result = "".join(result)
result = re.compile("(/movie/.+?)\s").findall(result)
result = [i for i in result if t in i]
result = [i for i in result if any(x in i for x in years)][0]
url = urlparse.urljoin(self.base_link, result)
url = urlparse.urlparse(url).path
url = "/".join(url.split("/")[:3]) + "/"
url = client.replaceHTMLCodes(url)
url = url.encode("utf-8")
return url
except:
pass
示例12: request
def request(self, url, check):
try:
result = client.source(url)
if check in str(result): return result.decode('iso-8859-1').encode('utf-8')
result = client.source(proxy.get() + urllib.quote_plus(url))
if check in str(result): return result.decode('iso-8859-1').encode('utf-8')
result = client.source(proxy.get() + urllib.quote_plus(url))
if check in str(result): return result.decode('iso-8859-1').encode('utf-8')
except:
return
示例13: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
tvshowtitle = cleantitle.get(data['tvshowtitle'] )
season = '%01d' % int(season)
episode = '%01d' % int(episode)
try:
query = '%s season %01d' % (data['tvshowtitle'], int(season))
query = base64.b64decode(self.search_link) % urllib.quote_plus(query)
result = client.source(query)
result = json.loads(result)['results']
r = [(i['url'], i['titleNoFormatting']) for i in result]
r = [(i[0], re.compile('(^Watch Full "|^Watch |)(.+)').findall(i[1])) for i in r]
r = [(i[0], i[1][0][-1]) for i in r if len(i[1]) > 0]
r = [(i[0], re.compile('(.+?) - Season (\d*)').findall(i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [(re.sub('http.+?//.+?/','', i[0]), i[1], i[2]) for i in r]
r = [('/'.join(i[0].split('/')[:2]), i[1], i[2]) for i in r]
r = [x for y,x in enumerate(r) if x not in r[:y]]
r = [i for i in r if tvshowtitle == cleantitle.get(i[1])]
u = [i[0] for i in r if season == '%01d' % int(i[2])][0]
except:
query = self.search2_link % urllib.quote_plus(data['tvshowtitle'])
query = urlparse.urljoin(self.base_link, query)
result = client.source(query)
r = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.compile('(.+?) - Season (\d*)').findall(i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [(re.sub('http.+?//.+?/','', i[0]), i[1], i[2]) for i in r]
r = [('/'.join(i[0].split('/')[:2]), i[1], i[2]) for i in r]
r = [x for y,x in enumerate(r) if x not in r[:y]]
r = [i for i in r if tvshowtitle == cleantitle.get(i[1])]
u = [i[0] for i in r if season == '%01d' % int(i[2])][0]
url = urlparse.urljoin(self.base_link, u)
url = urlparse.urlparse(url).path
url += '?episode=%01d' % int(episode)
url = url.encode('utf-8')
return url
except:
return
示例14: movie
def movie(self, imdb, title, year):
try:
query = '%s %s' % (title.replace(':', ' '), year)
query = base64.b64decode(self.search_link) % urllib.quote_plus(query)
result = client.source(query)
result = json.loads(result)['results']
t = cleantitle.get(title)
years = ['(%s)' % str(year)]
result = [(i['url'], i['titleNoFormatting']) for i in result]
result = [(i[0], re.compile('(^Watch Full "|^Watch |^Xmovies8:|^xmovies8:|)(.+? [(]\d{4}[)])').findall(i[1])) for i in result]
result = [(i[0], i[1][0][-1]) for i in result if len(i[1]) > 0]
result = [i for i in result if t == cleantitle.get(i[1])]
result = [i[0] for i in result if any(x in i[1] for x in years)][0]
url = urlparse.urljoin(self.base_link, result)
url = urlparse.urlparse(url).path
url = '/'.join(url.split('/')[:3]) + '/'
return url
except:
pass
try:
t = title.replace('\'', '')
t = re.sub(r'[^a-zA-Z0-9\s]+', ' ', t).lower().strip()
t = re.sub('\s\s+' , ' ', t)
t = '/movie/' + t.replace(' ' , '-') + '-'
years = ['-%s' % str(year)]
query = base64.b64decode(self.search_link_2) % t
result = client.source(query)
result = json.loads(result)['results']
result = [i['contentNoFormatting'] for i in result]
result = ''.join(result)
result = re.compile('(/movie/.+?)\s').findall(result)
result = [i for i in result if t in i]
result = [i for i in result if any(x in i for x in years)][0]
url = urlparse.urljoin(self.base_link, result)
url = urlparse.urlparse(url).path
url = '/'.join(url.split('/')[:3]) + '/'
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
pass
示例15: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
content = re.compile('(.+?)\?episode=\d*$').findall(url)
content = 'movie' if len(content) == 0 else 'episode'
try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
except: pass
result = client.source(url)
if content == 'movie':
url = client.parseDOM(result, 'iframe', ret='src')[0]
else:
url = zip(client.parseDOM(result, 'a', ret='href', attrs = {'target': 'player_iframe'}), client.parseDOM(result, 'a', attrs = {'target': 'player_iframe'}))
url = [(i[0], re.compile('(\d+)').findall(i[1])) for i in url]
url = [(i[0], i[1][-1]) for i in url if len(i[1]) > 0]
url = [i[0] for i in url if i[1] == '%01d' % int(episode)][0]
url = client.replaceHTMLCodes(url)
result = client.source(url)
headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url}
url = 'http://player.pubfilm.com/smplayer/plugins/gkphp/plugins/gkpluginsphp.php'
post = re.compile('link\s*:\s*"([^"]+)').findall(result)[0]
post = urllib.urlencode({'link': post})
result = client.source(url, post=post, headers=headers)
r = re.compile('"?link"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"').findall(result)
if not r: r = [(i, 480) for i in re.compile('"?link"?\s*:\s*"([^"]+)').findall(result)]
r = [(i[0].replace('\\/', '/'), i[1]) for i in r]
links = [(i[0], '1080p') for i in r if int(i[1]) >= 1080]
links += [(i[0], 'HD') for i in r if 720 <= int(i[1]) < 1080]
links += [(i[0], 'SD') for i in r if 480 <= int(i[1]) < 720]
if not 'SD' in [i[1] for i in links]: links += [(i[0], 'SD') for i in r if 360 <= int(i[1]) < 480]
for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Pubfilm', 'url': i[0], 'direct': True, 'debridonly': False})
return sources
except:
return sources