本文整理汇总了Python中resources.lib.modules.source_utils.check_directstreams函数的典型用法代码示例。如果您正苦于以下问题:Python check_directstreams函数的具体用法?Python check_directstreams怎么用?Python check_directstreams使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了check_directstreams函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
for i in range(3):
result = client.request(url)
if not result == None: break
links = re.compile('onclick="report\(\'([^\']+)').findall(result)
for link in links:
try:
valid, hoster = source_utils.is_host_valid(link, hostDict)
if not valid: continue
urls, host, direct = source_utils.check_directstreams(link, hoster)
for x in urls:
# if x['quality'] == 'SD':
# try:
# result = client.request(x['url'], timeout=5)
# if 'HDTV' in result or '720' in result: x['quality'] = 'HD'
# if '1080' in result: x['quality'] = '1080p'
# except:
# pass
sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例2: __get_episode_url
def __get_episode_url(self, data, hostDict):
scraper = cfscrape.create_scraper()
try:
value = "/seasons/" + cleantitle.geturl(data['tvshowtitle']) + '-season-' + data['season']
url = self.base_link + value
print("INFO - " + url)
html = scraper.get(self.base_link)
html = scraper.get(url)
page_list = BeautifulSoup(html.text, 'html.parser')
page_list = page_list.find_all('div', {'class':'episodiotitle'})
ep_page = ''
for i in page_list:
if re.sub(r'\W+', '', data['title'].lower()) in re.sub(r'\W+', '', i.text.lower()):
ep_page = i.prettify()
if ep_page == '': return ''
ep_page = BeautifulSoup(ep_page, 'html.parser').find_all('a')[0]['href']
html = scraper.get(ep_page)
embed = re.findall('<iframe.+?src=\"(.+?)\"', html.text)[0]
url = embed
sources = []
if 'mehliz' in url:
html = scraper.get(url, headers={'referer': self.base_link + '/'})
files = re.findall('file: \"(.+?)\".+?label: \"(.+?)\"', html.text)
for i in files:
try:
sources.append({
'source': 'gvideo',
'quality': i[2],
'language': 'en',
'url': i[0] + "|Referer=https://www.mehlizmovies.com",
'direct': True,
'debridonly': False
})
except Exception:
pass
else:
valid, hoster = source_utils.is_host_valid(url, hostDict)
if not valid: return ''
urls, host, direct = source_utils.check_directstreams(url, hoster)
sources.append({
'source': host,
'quality': urls[0]['quality'],
'language': 'en',
'url': url + "|Referer=https://www.mehlizmovies.com",
'direct': False,
'debridonly': False
})
return sources
except Exception:
print("Unexpected error in Mehlix _get_episode_url Script:")
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, exc_tb.tb_lineno)
return ""
示例3: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
for i in range(3):
result = client.request(url, timeout=10)
if not result == None: break
dom = dom_parser.parse_dom(result, 'div', attrs={'class':'links', 'id': 'noSubs'})
result = dom[0].content
links = re.compile('<tr\s*>\s*<td><i\s+class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',re.DOTALL).findall(result)
for link in links[:5]:
try:
url2 = urlparse.urljoin(self.base_link, link[1])
for i in range(2):
result2 = client.request(url2, timeout=3)
if not result2 == None: break
r = re.compile('href="([^"]+)"\s+class="action-btn').findall(result2)[0]
valid, hoster = source_utils.is_host_valid(r, hostDict)
if not valid: continue
urls, host, direct = source_utils.check_directstreams(r, hoster)
for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
except:
#traceback.print_exc()
pass
return sources
except:
return sources
示例4: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
aliases = eval(data['aliases'])
headers = {}
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
year = data['year']
if 'tvshowtitle' in data:
episode = data['episode']
season = data['season']
url = self._search(data['tvshowtitle'], data['year'], aliases, headers)
url = url.replace('online-free','season-%s-episode-%s-online-free'%(season,episode))
else:
episode = None
year = data['year']
url = self._search(data['title'], data['year'], aliases, headers)
url = url if 'http' in url else urlparse.urljoin(self.base_link, url)
result = client.request(url);
result = client.parseDOM(result, 'li', attrs={'class':'link-button'})
links = client.parseDOM(result, 'a', ret='href')
i = 0
for l in links:
if i == 10: break
try:
l = l.split('=')[1]
l = urlparse.urljoin(self.base_link, self.video_link%l)
result = client.request(l, post={}, headers={'Referer':url})
u = result if 'http' in result else 'http:'+result
if 'google' in u:
valid, hoster = source_utils.is_host_valid(u, hostDict)
urls, host, direct = source_utils.check_directstreams(u, hoster)
for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
else:
valid, hoster = source_utils.is_host_valid(u, hostDict)
if not valid: continue
try:
u.decode('utf-8')
sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': u, 'direct': False, 'debridonly': False})
i+=1
except:
pass
except:
pass
return sources
except:
return sources
示例5: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if not str(url).startswith('http'):
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
if 'tvshowtitle' in data:
url = '%s/episode/%s-s%02de%02d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
year = re.findall('(\d{4})', data['premiered'])[0]
url = client.request(url, output='geturl')
if url == None: raise Exception()
r = client.request(url)
y = client.parseDOM(r, 'span', attrs = {'class': 'date'})
y += [i for i in client.parseDOM(r, 'div', attrs = {'class': 'metadatac'}) if 'date' in i]
y = re.findall('(\d{4})', y[0])[0]
if not y == year: raise Exception()
else:
url = '%s/movie/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year'])
url = client.request(url, output='geturl')
if url == None: raise Exception()
r = client.request(url)
else:
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
links = client.parseDOM(r, 'iframe', ret='src')
for link in links:
try:
valid, hoster = source_utils.is_host_valid(link, hostDict)
if not valid: continue
urls, host, direct = source_utils.check_directstreams(link, hoster)
for x in urls:
if x['quality'] == 'SD':
try:
if 'HDTV' in x['url'] or '720' in x['url']: x['quality'] = 'HD'
if '1080' in x['url']: x['quality'] = '1080p'
except:
pass
sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例6: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
data = urlparse.parse_qs(url)
data = dict((i, data[i][0]) for i in data)
if 'tvshowtitle' in data:
url = self.__get_episode_url(data)
else:
url = self.__get_movie_url(data)
urls = []
if isinstance(url, str):
urls.append(url)
else:
urls.extend(url)
for url in urls:
if 'mehlizmovies.is' in url:
html = client.request(url, referer=self.base_link + '/')
files = re.findall('file: \"(.+?)\".+?label: \"(.+?)\"', html)
for i in files:
try:
sources.append({
'source': 'gvideo',
'quality': i[1],
'language': 'en',
'url': i[0],
'direct': True,
'debridonly': False
})
except Exception:
pass
else:
valid, hoster = source_utils.is_host_valid(url, hostDict)
if not valid: continue
urls, host, direct = source_utils.check_directstreams(url, hoster)
sources.append({
'source': hoster,
'quality': urls[0]['quality'],
'language': 'en',
'url': url,
'direct': False,
'debridonly': False
})
return sources
except Exception:
return
示例7: sources
def sources(self, url, hostDict, locDict):
sources = []
try:
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
query = self.search_link % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
result = client.request(query, mobile=True, timeout=20, output='extended')
r = json.loads(result[0])
r = r['data']['films']
years = [str(data['year']), str(int(data['year']) + 1), str(int(data['year']) - 1)]
#print r
if 'episode' in data:
r = [i for i in r if cleantitle.get(title) == cleantitle.get(i['title'])]
r = [(i,re.sub('[^0-9]', '', str(i['publishDate']))) for i in r ]
r = [i[0] for i in r if any(x in i[1] for x in years)][0]
result = client.request(urlparse.urljoin(self.base_link, self.sources_link % r['id']), mobile=True, headers=result[4], output='extended')
r = json.loads(result[0])
r = [i for i in r['data']['chapters'] if i['title'].replace('0','').lower() == 's%se%s' %(data['season'],data['episode'])][0]
else:
r = [i for i in r if cleantitle.get(title) == cleantitle.get(i['title'])]
r = [i for i in r if any(x in i['publishDate'] for x in years)][0]
#print r
result = client.request(urlparse.urljoin(self.base_link, self.sources_link % r['id']), mobile=True, headers=result[4], output='extended')
r = json.loads(result[0])
r = r['data']['chapters'][0]
result = client.request(urlparse.urljoin(self.base_link, self.stream_link % r['id']), mobile=True,
headers=result[4], output='extended')
r = json.loads(result[0])
r = [(i['quality'], i['server'], self._decrypt(i['stream'])) for i in r['data']]
sources = []
for i in r:
try:
valid, hoster = source_utils.is_host_valid(i[2], hostDict)
if not valid: continue
urls, host, direct = source_utils.check_directstreams(i[2], hoster)
for x in urls:
q = x['quality'] if host == 'gvideo' else source_utils.label_to_quality(i[0])
u = x['url'] if host == 'gvideo' else i[2]
sources.append({'source': host, 'quality': q, 'language': 'en', 'url': u, 'direct': direct, 'debridonly': False})
except:
pass
return sources
except Exception as e:
return sources
示例8: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
content = 'episode' if 'episode' in url else 'movie'
result = client.request(url)
try:
url = re.findall(r"class\s*=\s*'play_container'\s+href\s*=\s*'([^']+)", result)[0]
result = client.request(url, timeout='10')
except:
pass
try:
url = re.compile('ajax\(\{\s*url\s*:\s*[\'"]([^\'"]+)').findall(result)[0]
post = 'post'
except:
url = re.compile(r'onclick=.*?show_player.*?,.*?"([^\\]+)').findall(result)[0]
post = None
if content <> 'movie':
try:
if post == 'post':
id, episode = re.compile('id=(\d+).*?&e=(\d*)').findall(url)[0]
post = {'id': id, 'e': episode, 'cat': 'episode'}
except:
pass
else:
if post == 'post':
id = re.compile('id=(\d+)').findall(url)[0]
post = {'id': id, 'cat': 'movie'}
if post <> None:
result = client.request(url, post=post)
url = re.findall(r"(https?:.*?)'\s+id='avail_links",result)[0]
try:
if 'google' in url:
valid, hoster = source_utils.is_host_valid(url, hostDict)
urls, host, direct = source_utils.check_directstreams(url, hoster)
for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
else:
valid, hoster = source_utils.is_host_valid(url, hostDict)
sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例9: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
hostDict += ['akamaized.net', 'google.com', 'picasa.com', 'blogspot.com']
result = client.request(url, timeout=10)
dom = dom_parser.parse_dom(result, 'a', req='data-video')
urls = [i.attrs['data-video'] if i.attrs['data-video'].startswith('https') else 'https:' + i.attrs['data-video'] for i in dom]
for url in urls:
dom = []
if 'vidnode.net' in url:
result = client.request(url, timeout=10)
dom = dom_parser.parse_dom(result, 'source', req=['src','label'])
dom = [(i.attrs['src'] if i.attrs['src'].startswith('https') else 'https:' + i.attrs['src'], i.attrs['label']) for i in dom if i]
elif 'ocloud.stream' in url:
result = client.request(url, timeout=10)
base = re.findall('<base href="([^"]+)">', result)[0]
hostDict += [base]
dom = dom_parser.parse_dom(result, 'a', req=['href','id'])
dom = [(i.attrs['href'].replace('./embed',base+'embed'), i.attrs['id']) for i in dom if i]
dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)", client.request(i[0]))[0], i[1]) for i in dom if i]
if dom:
try:
for r in dom:
valid, hoster = source_utils.is_host_valid(r[0], hostDict)
if not valid: continue
quality = source_utils.label_to_quality(r[1])
urls, host, direct = source_utils.check_directstreams(r[0], hoster)
for x in urls:
if direct: size = source_utils.get_size(x['url'])
if size: sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False, 'info': size})
else: sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
except: pass
else:
valid, hoster = source_utils.is_host_valid(url, hostDict)
if not valid: continue
try:
url.decode('utf-8')
sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例10: __get_movie_url
def __get_movie_url(self, data, hostDict):
scraper = cfscrape.create_scraper()
try:
html = scraper.get(self.base_link +"/movies/"+cleantitle.geturl(data['title']))
embeds = re.findall('play-box-iframe.+\s<iframe.+?src=\"(.+?)\"', html.text)[0]
print("INFO - " + embeds)
url = embeds
sources = []
if 'mehliz' in url:
html = scraper.get(url, headers={'referer': self.base_link + '/'})
files = re.findall('file: \"(.+?)\".+?label: \"(.+?)\"', html.text)
for i in files:
try:
sources.append({
'source': 'gvideo',
'quality': i[1],
'language': 'en',
'url': i[0] + "|Referer=https://www.mehlizmovies.com",
'direct': True,
'debridonly': False
})
except Exception:
pass
else:
valid, hoster = source_utils.is_host_valid(url, hostDict)
if not valid: return ''
urls, host, direct = source_utils.check_directstreams(url, hoster)
sources.append({
'source': host,
'quality': urls[0]['quality'],
'language': 'en',
'url': url + "|Referer=https://www.mehlizmovies.com",
'direct': False,
'debridonly': False
})
return sources
except Exception:
print("Unexpected error in Mehliz getMovieURL Script:")
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, exc_tb.tb_lineno)
return ""
示例11: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
rels = dom_parser.parse_dom(rels, 'li')
rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
rels = [(i[0][0].attrs['href'][1:], re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]]
rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de']
r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]
r = [(re.findall('link"?\s*:\s*"(.+?)"', ''.join([x.content for x in i])), dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')) for i in r]
r = [i[0][0] if i[0] else i[1][0].attrs['src'] for i in r if i[0] or i[1]]
for i in r:
try:
i = re.sub('\[.+?\]|\[/.+?\]', '', i)
i = client.replaceHTMLCodes(i)
if not i.startswith('http'): i = self.__decode_hash(i)
if 'play.seriesever' in i:
i = client.request(i)
i = dom_parser.parse_dom(i, 'iframe', req='src')
if len(i) < 1: continue
i = i[0].attrs['src']
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
urls, host, direct = source_utils.check_directstreams(i, host)
for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例12: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if not str(url).startswith('http'):
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
url = '%s/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year'])
url = client.request(url, output='geturl')
if url == None: raise Exception()
r = client.request(url)
else:
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
links = client.parseDOM(r, 'iframe', ret='src')
for link in links:
try:
valid, hoster = source_utils.is_host_valid(link, hostDict)
if not valid: continue
urls, host, direct = source_utils.check_directstreams(link, hoster)
for x in urls:
if x['quality'] == 'SD':
try:
if 'HDTV' in x['url'] or '720' in x['url']: x['quality'] = 'HD'
if '1080' in x['url']: x['quality'] = '1080p'
except:
pass
sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例13: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
for i in range(3):
result = client.request(url)
if not result == None: break
links = re.compile('onclick="report\(\'([^\']+)').findall(result)
for link in links:
try:
valid, hoster = source_utils.is_host_valid(link, hostDict)
if not valid: continue
urls, host, direct = source_utils.check_directstreams(link, hoster)
for x in urls:
if x['quality'] == 'SD':
try:
result = client.request(x['url'], timeout=5)
if 'HDTV' in result or '720' in result: x['quality'] = 'SD',
if '1080' in result: x['quality'] = '1080p'
except:
pass
示例14: sources
def sources(self, url, hostDict, hostprDict):
#log_utils.log('\n\n~~~ incoming sources() url')
#log_utils.log(url)
try:
sources = []
if url == None: return sources
req = urlparse.urljoin(self.base_link, url)
# three attempts to pull up the episode-page, then bail
for i in range(4):
result = client.request(req, timeout=3)
if not result == None: break
# get the key div's contents
# then get all the links along with preceding text hinting at host
# ep pages sort links by hoster which is bad if the top hosters
# are unavailable for debrid OR if they're ONLY avail for debrid
# (for non-debrid peeps) so shuffle the list
dom = dom_parser.parse_dom(result, 'div', attrs={'class':'links', 'id': 'noSubs'})
result = dom[0].content
links = re.compile('<i class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',re.DOTALL).findall(result)
random.shuffle(links)
# Here we stack the deck for debrid users by copying
# all debrid hosts to the top of the list
# This is ugly but it works. Someone else please make it cleaner?
if debrid.status() == True:
debrid_links = []
for pair in links:
for r in debrid.debrid_resolvers:
if r.valid_url('', pair[0].strip()): debrid_links.append(pair)
links = debrid_links + links
# master list of hosts ResolveURL and placenta itself can resolve
# we'll check against this list to not waste connections on unsupported hosts
hostDict = hostDict + hostprDict
conns = 0
for pair in links:
# try to be a little polite, and limit connections
# (unless we're not getting sources)
if conns > self.max_conns and len(sources) > self.min_srcs: break
# the 2 groups from the link search = hoster name, episode page url
host = pair[0].strip()
link = pair[1]
# check for valid hosts and jump to next loop if not valid
valid, host = source_utils.is_host_valid(host, hostDict)
#log_utils.log("\n\n** conn #%s: %s (valid:%s) %s" % (conns,host,valid,link)) #######
if not valid: continue
# two attempts per source link, then bail
# NB: n sources could potentially cost n*range connections!!!
link = urlparse.urljoin(self.base_link, link)
for i in range(2):
result = client.request(link, timeout=3)
conns += 1
if not result == None: break
# if both attempts failed, using the result will too, so bail to next loop
try:
link = re.compile('href="([^"]+)"\s+class="action-btn').findall(result)[0]
except:
continue
# I don't think this scraper EVER has direct links, but...
# (if nothing else, it sets the quality)
try:
u_q, host, direct = source_utils.check_directstreams(link, host)
except:
continue
# check_directstreams strangely returns a list instead of a single 2-tuple
link, quality = u_q[0]['url'], u_q[0]['quality']
#log_utils.log(' checked host: %s' % host)
#log_utils.log(' checked direct: %s' % direct)
#log_utils.log(' quality, link: %s, %s' % (quality,link))
#log_utils.log(' # of urls: %s' % len(u_q))
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': direct, 'debridonly': False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('WATCHSERIES - Exception: \n' + str(failure))
return sources
示例15: sources
def sources(self, url, hostDict, hostprDict):
'''
Loops over site sources and returns a dictionary with corresponding
file locker sources and information
Keyword arguments:
url -- string - url params
Returns:
sources -- string - a dictionary of source information
'''
sources = []
try:
data = urlparse.parse_qs(url)
data = dict((i, data[i][0]) for i in data)
data['sources'] = re.findall("[^', u\]\[]+", data['sources'])
for i in data['sources']:
token = str(self.__token(
{'id': i, 'update': 0, 'ts': data['ts']}))
query = (self.info_path % (data['ts'], token, i))
url = urlparse.urljoin(self.base_link, query)
info_response = client.request(url, XHR=True)
grabber_dict = json.loads(info_response)
try:
if grabber_dict['type'] == 'direct':
token64 = grabber_dict['params']['token']
query = (self.grabber_path % (data['ts'], i, token64))
url = urlparse.urljoin(self.base_link, query)
response = client.request(url, XHR=True)
sources_list = json.loads(response)['data']
for j in sources_list:
quality = j['label'] if not j['label'] == '' else 'SD'
quality = source_utils.label_to_quality(quality)
if 'googleapis' in j['file']:
sources.append({'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': j['file'], 'direct': True, 'debridonly': False})
continue
valid, hoster = source_utils.is_host_valid(j['file'], hostDict)
urls, host, direct = source_utils.check_directstreams(j['file'], hoster)
for x in urls:
sources.append({
'source': 'gvideo',
'quality': quality,
'language': 'en',
'url': x['url'],
'direct': True,
'debridonly': False
})
elif not grabber_dict['target'] == '':
url = 'https:' + grabber_dict['target'] if not grabber_dict['target'].startswith('http') else grabber_dict['target']
valid, hoster = source_utils.is_host_valid(url, hostDict)
if not valid: continue
urls, host, direct = source_utils.check_directstreams(url, hoster)
url = urls[0]['url']
if 'cloud.to' in host:
headers = {
'Referer': self.base_link
}
url = url + source_utils.append_headers(headers)
sources.append({
'source': hoster,
'quality': urls[0]['quality'],
'language': 'en',
'url': url,
'direct': False,
'debridonly': False
})
except: pass
return sources
except Exception:
return sources