本文整理汇总了Python中resources.lib.modules.source_utils.is_host_valid函数的典型用法代码示例。如果您正苦于以下问题:Python is_host_valid函数的具体用法?Python is_host_valid怎么用?Python is_host_valid使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了is_host_valid函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = requests.get(url).content
qual = re.compile('class="quality">(.+?)<').findall(r)
for i in qual:
if '1080' in i:
quality = '1080p'
elif '720' in i:
quality = '720p'
else:
quality = 'SD'
u = client.parseDOM(r, "div", attrs={"class": "pa-main anime_muti_link"})
for t in u:
u = re.findall('<li class=".+?" data-video="(.+?)"', t)
for url in u:
if 'vidcloud' in url:
url = 'https:' + url
r = requests.get(url).content
t = re.findall('li data-status=".+?" data-video="(.+?)"', r)
for url in t:
if 'vidcloud' in url:
continue
valid, host = source_utils.is_host_valid(url, hostDict)
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
if 'vidcloud' in url:
continue
valid, host = source_utils.is_host_valid(url, hostDict)
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
return sources
except:
return
示例2: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
url = url + 'watching/?ep=1'
r = self.scraper.get(url).content
r = re.compile('a title="(.+?)" data-svv.+?="(.+?)"').findall(r)
for title, url in r:
if 'HD' in title:
quality = '1080p'
elif 'CAM' in title:
quality = 'CAM'
else:
quality = 'SD'
if 'vidcloud' in url:
r = self.scraper.get(url).content
t = re.findall('li data-status=".+?" data-video="(.+?)"', r)
print t
for url in t:
if 'vidcloud' in url:
continue
valid, host = source_utils.is_host_valid(url, hostDict)
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct':False, 'debridonly': False})
print url
if 'vidcloud' in url:
continue
valid, host = source_utils.is_host_valid(url, hostDict)
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct':False, 'debridonly': False})
return sources
except:
return
示例3: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
aliases = eval(data['aliases'])
headers = {}
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
year = data['year']
if 'tvshowtitle' in data:
episode = data['episode']
season = data['season']
url = self._search(data['tvshowtitle'], data['year'], aliases, headers)
url = url.replace('online-free','season-%s-episode-%s-online-free'%(season,episode))
else:
episode = None
year = data['year']
url = self._search(data['title'], data['year'], aliases, headers)
url = url if 'http' in url else urlparse.urljoin(self.base_link, url)
result = client.request(url);
result = client.parseDOM(result, 'li', attrs={'class':'link-button'})
links = client.parseDOM(result, 'a', ret='href')
i = 0
for l in links:
if i == 10: break
try:
l = l.split('=')[1]
l = urlparse.urljoin(self.base_link, self.video_link%l)
result = client.request(l, post={}, headers={'Referer':url})
u = result if 'http' in result else 'http:'+result
if 'google' in u:
valid, hoster = source_utils.is_host_valid(u, hostDict)
urls, host, direct = source_utils.check_directstreams(u, hoster)
for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
else:
valid, hoster = source_utils.is_host_valid(u, hostDict)
if not valid: continue
try:
u.decode('utf-8')
sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': u, 'direct': False, 'debridonly': False})
i+=1
except:
pass
except:
pass
return sources
except:
return sources
示例4: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
r = client.request(url)
if '<meta name="application-name" content="Unblocked">' in r: return sources
r = client.parseDOM(r, 'div',attrs={'class':'entry-content'})[0]
frames = []
frames += client.parseDOM(r, 'iframe', ret='src')
frames += client.parseDOM(r, 'a', ret='href')
frames += client.parseDOM(r, 'source', ret='src')
try:
q = re.findall('<strong>Quality:</strong>([^<]+)', r)[0]
if 'high' in q.lower(): quality = '720p'
elif 'cam' in q.lower(): quality = 'CAM'
else: quality = 'SD'
except: quality = 'SD'
for i in frames:
try:
if 'facebook' in i or 'plus.google' in i: continue
url = i
if 'https://openload.co' in url and url.lower().endswith(('embed/%s')):
sources.append({'source': 'CDN', 'quality': quality, 'language': 'en', 'url': url,
'info': '', 'direct': False, 'debridonly': False})
elif 'ok.ru' in url:
print url
host = 'vk'
url = directstream.odnoklassniki(url)
print url
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url,
'info': '', 'direct': False, 'debridonly': False})
elif 'vk.com' in url:
host = 'vk'
url = directstream.vk(url)
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url,
'info': '', 'direct': False, 'debridonly': False})
else:
valid, host = source_utils.is_host_valid(url, hostDict)
if valid:
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url,
'info': '', 'direct': False, 'debridonly': False})
else:
valid, host = source_utils.is_host_valid(url, hostprDict)
if not valid: continue
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url,
'info': '', 'direct': False, 'debridonly': True})
except:
pass
return sources
except:
return sources
示例5: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
content = 'episode' if 'episode' in url else 'movie'
result = client.request(url)
try:
url = re.findall(r"class\s*=\s*'play_container'\s+href\s*=\s*'([^']+)", result)[0]
result = client.request(url, timeout='10')
except:
pass
try:
url = re.compile('ajax\(\{\s*url\s*:\s*[\'"]([^\'"]+)').findall(result)[0]
post = 'post'
except:
url = re.compile(r'onclick=.*?show_player.*?,.*?"([^\\]+)').findall(result)[0]
post = None
if content <> 'movie':
try:
if post == 'post':
id, episode = re.compile('id=(\d+).*?&e=(\d*)').findall(url)[0]
post = {'id': id, 'e': episode, 'cat': 'episode'}
except:
pass
else:
if post == 'post':
id = re.compile('id=(\d+)').findall(url)[0]
post = {'id': id, 'cat': 'movie'}
if post <> None:
result = client.request(url, post=post)
url = re.findall(r"(https?:.*?)'\s+id='avail_links",result)[0]
try:
if 'google' in url:
valid, hoster = source_utils.is_host_valid(url, hostDict)
urls, host, direct = source_utils.check_directstreams(url, hoster)
for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
else:
valid, hoster = source_utils.is_host_valid(url, hostDict)
sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例6: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
hostDict += ['akamaized.net', 'google.com', 'picasa.com', 'blogspot.com']
result = client.request(url, timeout=10)
dom = dom_parser.parse_dom(result, 'a', req='data-video')
urls = [i.attrs['data-video'] if i.attrs['data-video'].startswith('https') else 'https:' + i.attrs['data-video'] for i in dom]
for url in urls:
dom = []
if 'vidnode.net' in url:
result = client.request(url, timeout=10)
dom = dom_parser.parse_dom(result, 'source', req=['src','label'])
dom = [(i.attrs['src'] if i.attrs['src'].startswith('https') else 'https:' + i.attrs['src'], i.attrs['label']) for i in dom if i]
elif 'ocloud.stream' in url:
result = client.request(url, timeout=10)
base = re.findall('<base href="([^"]+)">', result)[0]
hostDict += [base]
dom = dom_parser.parse_dom(result, 'a', req=['href','id'])
dom = [(i.attrs['href'].replace('./embed',base+'embed'), i.attrs['id']) for i in dom if i]
dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)", client.request(i[0]))[0], i[1]) for i in dom if i]
if dom:
try:
for r in dom:
valid, hoster = source_utils.is_host_valid(r[0], hostDict)
if not valid: continue
quality = source_utils.label_to_quality(r[1])
urls, host, direct = source_utils.check_directstreams(r[0], hoster)
for x in urls:
if direct: size = source_utils.get_size(x['url'])
if size: sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False, 'info': size})
else: sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
except: pass
else:
valid, hoster = source_utils.is_host_valid(url, hostDict)
if not valid: continue
try:
url.decode('utf-8')
sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例7: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
query = urlparse.urljoin(self.base_link, url)
r = client.request(query)
quality = dom_parser.parse_dom(r, 'span', attrs={'id': 'release_text'})[0].content.split(' ')[0]
quality, info = source_utils.get_release_quality(quality)
r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'currentStreamLinks'})
r = [(dom_parser.parse_dom(i, 'p', attrs={'class': 'hostName'}), dom_parser.parse_dom(i, 'a', attrs={'class': 'stream-src'}, req='data-id')) for i in r]
r = [(re.sub(' hd$', '', i[0][0].content.lower()), [x.attrs['data-id'] for x in i[1]]) for i in r if i[0] and i[1]]
for hoster, id in r:
valid, hoster = source_utils.is_host_valid(hoster, hostDict)
if not valid: continue
sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'info': ' | '.join(info + ['' if len(id) == 1 else 'multi-part']), 'url': id, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
示例8: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
query = urlparse.urljoin(self.base_link, url)
r = client.request(query)
links = client.parseDOM(r, 'div', attrs={'class': 'xg_user_generated'})
links = dom_parser.parse_dom(links, 'a')
for i in links:
url = i[0]['href']
if 'youtube' in url: continue
quality = 'SD'
lang, info = 'gr', 'SUB'
valid, host = source_utils.is_host_valid(url, hostDict)
if 'hdvid' in host: valid = True
if not valid: continue
sources.append({'source': host, 'quality': quality, 'language': lang, 'url': url, 'info': info,
'direct':False,'debridonly': False})
return sources
except:
return sources
示例9: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
url = data.get('url')
episode = int(data.get('episode', 1))
r = client.request(urlparse.urljoin(self.base_link, url))
r = {'': dom_parser.parse_dom(r, 'div', attrs={'id': 'gerdub'}), 'subbed': dom_parser.parse_dom(r, 'div', attrs={'id': 'gersub'})}
for info, data in r.iteritems():
data = dom_parser.parse_dom(data, 'tr')
data = [dom_parser.parse_dom(i, 'a', req='href') for i in data if dom_parser.parse_dom(i, 'a', attrs={'id': str(episode)})]
data = [(link.attrs['href'], dom_parser.parse_dom(link.content, 'img', req='src')) for i in data for link in i]
data = [(i[0], i[1][0].attrs['src']) for i in data if i[1]]
data = [(i[0], re.findall('/(\w+)\.\w+', i[1])) for i in data]
data = [(i[0], i[1][0]) for i in data if i[1]]
for link, hoster in data:
valid, hoster = source_utils.is_host_valid(hoster, hostDict)
if not valid: continue
sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False})
return sources
except:
return sources
示例10: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None: return
urldata = urlparse.parse_qs(url)
urldata = dict((i, urldata[i][0]) for i in urldata)
title = urldata['title'].replace(':', ' ').lower()
year = urldata['year']
search_id = title.lower()
start_url = self.search_link % (self.base_link, search_id.replace(' ','%20'))
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
html = client.request(start_url,headers=headers)
Links = re.compile('"post","link":"(.+?)","title".+?"rendered":"(.+?)"',re.DOTALL).findall(html)
for link,name in Links:
link = link.replace('\\','')
if title.lower() in name.lower():
if year in name:
holder = client.request(link,headers=headers)
new = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(holder)[0]
end = client.request(new,headers=headers)
final_url = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(end)[0]
valid, host = source_utils.is_host_valid(final_url, hostDict)
sources.append({'source':host,'quality':'1080p','language': 'en','url':final_url,'info':[],'direct':False,'debridonly':False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('1080PMovies - Exception: \n' + str(failure))
return sources
示例11: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
data.update({'raw': 'true', 'language': 'de'})
data = urllib.urlencode(data)
data = client.request(urlparse.urljoin(self.base_link, self.request_link), post=data)
data = json.loads(data)
data = [i[1] for i in data[1].items()]
data = [(i['name'].lower(), i['links']) for i in data]
for host, links in data:
valid, host = source_utils.is_host_valid(host, hostDict)
if not valid: continue
for link in links:
try:sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': link['URL'], 'direct': False, 'debridonly': False})
except: pass
return sources
except:
return sources
示例12: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None: return sources
result = client.request(url)
result = client.parseDOM(result, 'div', attrs={'id':'downloads'})[0]
rows = client.parseDOM(result, 'tr')
for row in rows:
try:
cols = client.parseDOM(row, 'td')
host = client.parseDOM(cols[0], 'img', ret='src')[0]
host = host.rpartition('=')[-1]
link = client.parseDOM(cols[0], 'a', ret='href')[0]
valid, host = source_utils.is_host_valid(host, hostDict)
if not valid: continue
q = 'SD'
if 'Wysoka' in cols[1]: q = 'HD'
lang, info = self.get_lang_by_type(cols[2])
sources.append({'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例13: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
url = data['url']
episode = data.get('episode')
r = client.request(urlparse.urljoin(self.base_link, url))
if episode:
rel = dom_parser.parse_dom(r, 'a', attrs={'class': 'fstab', 'title': re.compile('Episode %s$' % episode)}, req='data-rel')
rel = [dom_parser.parse_dom(r, 'div', attrs={'id': i.attrs['data-rel']}) for i in rel]
rel = [i[0].content for i in rel if i]
r = ' '.join(rel)
r = dom_parser.parse_dom(r, 'div', attrs={'class': re.compile('s?elink')})
r = dom_parser.parse_dom(r, 'a', req='href')
r = [i.attrs['href'] for i in r]
for h_url in r:
valid, hoster = source_utils.is_host_valid(h_url, hostDict)
if not valid: continue
sources.append({'source': hoster, 'quality': 'SD', 'language': 'fr', 'url': h_url, 'direct': False, 'debridonly': False})
return sources
except:
return sources
示例14: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
links = self.links_found(url)
hostdict = hostDict + hostprDict
for url in links:
try:
valid, host = source_utils.is_host_valid(url, hostdict)
if 'mehliz' in url:
host = 'MZ'; direct = True; urls = (self.mz_server(url))
elif 'ok.ru' in url:
host = 'vk'; direct = True; urls = (directstream.odnoklassniki(url))
else:
direct = False; urls = [{'quality': 'SD', 'url': url}]
for x in urls:
sources.append({'source': host, 'quality': x['quality'], 'language': 'en',
'url': x['url'], 'direct': direct, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例15: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
url = urlparse.urljoin(self.base_link, data.get('url'))
season = data.get('season')
episode = data.get('episode')
if season and episode:
r = urllib.urlencode({'imdbid': data['imdb'], 'language': 'de', 'season': season, 'episode': episode})
r = client.request(urlparse.urljoin(self.base_link, self.hoster_link), XHR=True, post=r)
else:
r = client.request(url)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'linkbox'})[0].content
r = re.compile('(<a.+?/a>)', re.DOTALL).findall(r)
r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'img', attrs={'class': re.compile('.*linkbutton')}, req='class')) for i in r]
r = [(i[0][0].attrs['href'], i[1][0].attrs['class'].lower()) for i in r if i[0] and i[1]]
r = [(i[0].strip(), 'HD' if i[1].startswith('hd') else 'SD') for i in r]
for url, quli in r:
valid, host = source_utils.is_host_valid(url, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': quli, 'language': 'de', 'url': url, 'direct': False, 'debridonly': False})
return sources
except:
return sources