本文整理汇总了Python中resources.lib.modules.debrid.status函数的典型用法代码示例。如果您正苦于以下问题:Python status函数的具体用法?Python status怎么用?Python status使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了status函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sources
def sources(self, url, hostDict, hostprDict):
try:
self._sources = []
self.items = []
if url is None:
return self._sources
if debrid.status() is False:
raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (
data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search.format(urllib.quote(query))
self._get_items(url)
self.hostDict = hostDict + hostprDict
threads = []
for i in self.items:
threads.append(workers.Thread(self._get_sources, i))
[i.start() for i in threads]
[i.join() for i in threads]
return self._sources
except BaseException:
return self._sources
示例2: movie
def movie(self, imdb, title, year):
try:
if debrid.status() == False: raise Exception()
t = cleantitle.get(title)
headers = {'X-Requested-With': 'XMLHttpRequest'}
query = self.search_link + urllib.quote_plus(title)
query = urlparse.urljoin(self.base_link, query)
r = client.request(query, headers=headers)
r = json.loads(r)
r = [i for i in r if 'category' in i and 'movie' in i['category'].lower()]
r = [(i['url'], i['label']) for i in r if 'label' in i and 'url' in i]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例3: sources
def sources(self, url, hostDict, hostprDict):
try:
self._sources = []
if url is None: return self._sources
if debrid.status() is False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (
data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
query = cleantitle.geturl(query)
url = urlparse.urljoin(self.base_link, query)
headers = {'User-Agent': client.agent()}
r = client.request(url, headers=headers)
posts = dom_parser2.parse_dom(r, 'li', {'class': re.compile('.+?'), 'id': re.compile('comment-.+?')})
self.hostDict = hostDict + hostprDict
threads = []
for i in posts: threads.append(workers.Thread(self._get_sources, i.content))
[i.start() for i in threads]
[i.join() for i in threads]
return self._sources
except Exception:
return self._sources
示例4: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = client.parseDOM(r, 'input', {'id': 'movie_id'}, ret='value')
if r:
r = client.request(urlparse.urljoin(self.base_link, self.download_link), post='movie=%s' % r, referer=url)
links = client.parseDOM(r, 'p')
hostDict = hostprDict + hostDict
locDict = [(i.rsplit('.', 1)[0], i) for i in hostDict]
for link in links:
try:
host = re.findall('Downloads-Server(.+?)(?:\'|\")\)', link)[0]
host = host.strip().lower().split()[-1]
if host == 'fichier': host = '1fichier'
host = [x[1] for x in locDict if host == x[0]][0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
url = client.parseDOM(link, 'a', ret='href')[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
r = client.parseDOM(link, 'a')[0]
fmt = r.strip().lower().split()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', r)[-1]
div = 1 if size.endswith(' GB') else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
info = '%.2f GB' % size
except:
info = ''
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
return sources
except:
return sources
示例5: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() is False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['title'].replace(':','').lower()
year = data['year']
query = '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = urlparse.urljoin(self.base_link, self.post_link)
post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus(query)
r = client.request(url, post=post)
r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
r = [(dom_parser2.parse_dom(i, 'div', attrs={'class':'news-title'})) for i in r if data['imdb'] in i]
r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r if i]
r = [(i[0].attrs['href'], i[0].content) for i in r if i]
hostDict = hostprDict + hostDict
for item in r:
try:
name = item[1]
y = re.findall('\((\d{4})\)', name)[0]
if not y == year: raise Exception()
s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', name)
s = s[0] if s else '0'
data = client.request(item[0])
data = dom_parser2.parse_dom(data, 'div', attrs={'id': 'r-content'})
data = re.findall('\s*<b><a href=.+?>(.+?)</b>.+?<u><b><a href="(.+?)".+?</a></b></u>',
data[0].content, re.DOTALL)
u = [(i[0], i[1], s) for i in data if i]
for name, url, size in u:
try:
if '4K' in name:
quality = '4K'
elif '2160p' in name:
quality = '4K'
elif '1440p' in name:
quality = '4K'
elif '1080p' in name:
quality = '1080p'
elif '720p' in name:
quality = '720p'
elif any(i in ['dvdscr', 'r5', 'r6'] for i in name):
quality = 'SCR'
示例6: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
if debrid.status(True) is False:
return
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except Exception:
return
示例7: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
if debrid.status(True) is False:
return
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except Exception:
failure = traceback.format_exc()
log_utils.log('TPB - Exception: \n' + str(failure))
return
示例8: movie
def movie(self, imdb, title, localtitle, aliases, year):
try:
if debrid.status() == False: raise Exception()
url = urlparse.urljoin(self.base_link, '%s-%s' % (cleantitle.geturl(title), year))
url = client.request(url, output='geturl')
if url == None:
url = urlparse.urljoin(self.base_link, '%s' % (cleantitle.geturl(title)))
url = client.request(url, output='geturl')
if url == None: raise Exception()
return url
except:
return
示例9: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if debrid.status() == False: raise Exception()
r = self.scraper.get(url).content
r = re.findall('<iframe src="(.+?)"', r)
for url in r:
valid, host = source_utils.is_host_valid(url, hostDict)
quality = source_utils.check_sd_url(url)
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
return sources
except:
return
示例10: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle']
season = '%01d' % int(data['season'])
episode = '%02d' % int(data['episode'])
r = cache.get(self.ddlseries_tvcache, 120)
r = [(i[0], i[3]) for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and season == i[2]]
links = []
for url, quality in r:
try:
link = client.request(url)
vidlinks = client.parseDOM(link, 'span', attrs = {'class': 'overtr'})[0]
match = re.compile('href="([^"]+)[^>]*>\s*Episode\s+(\d+)<').findall(vidlinks)
match = [(i[0], quality) for i in match if episode == i[1]]
links += match
except:
pass
for url, quality in links:
try:
if "protect-links" in url:
redirect = client.request(url)
url = re.findall('<a href="(.*?)" target="_blank">', redirect)
url = url[0]
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostprDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': True})
except:
pass
return sources
except:
return sources
示例11: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
if debrid.status(True) is False:
return
try:
if url is None:
return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except Exception:
return
示例12: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None: return sources
if debrid.status() == False: raise Exception()
data = url
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = requests.get(url).text
posts = re.findall(r'(?s)<item>(.*?)</item>', r)
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
title = re.findall(r'<title>(.*?)</title>', post)[0]
if query.lower() in title.lower():
linksDivs = re.findall(r'(?s)<singlelink></singlelink><br />(.*?)<br />.<strong>', post)
for div in linksDivs:
links = re.findall(r'<a href="(.*?)"', div)
for link in links:
quality = source_utils.get_quality_simple(link)
valid, host = source_utils.is_host_valid(link, hostDict)
if valid:
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': '', 'direct': False, 'debridonly': True})
except:
traceback.print_exc()
pass
return sources
except:
traceback.print_exc()
return sources
示例13: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url is None:
return sources
if debrid.status() is False:
raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (
data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
if 'tvshowtitle' in data:
url = self.tvsearch.format(urllib.quote_plus(query))
url = urlparse.urljoin(self.base_link, url)
else:
url = self.moviesearch.format(urllib.quote_plus(query))
url = urlparse.urljoin(self.base_link, url)
items = self._get_items(url)
hostDict = hostDict + hostprDict
for item in items:
try:
name = item[0]
quality, info = source_utils.get_release_quality(name, name)
info.append(item[2])
info = ' | '.join(info)
url = item[1]
url = url.split('&tr')[0]
sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info,
'direct': False, 'debridonly': True})
except BaseException:
pass
return sources
except BaseException:
return sources
示例14: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() is False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
html = client.request(url)
url_list = re.compile('<h2><a href="([^"]+)"',re.DOTALL).findall(html)
hostDict = hostprDict + hostDict
for url in url_list:
if cleantitle.get(title) in cleantitle.get(url):
html = client.request(url)
links = re.compile('href="([^"]+)" rel="nofollow"',re.DOTALL).findall(html)
for vid_url in links:
if 'ouo.io' in vid_url:
continue
if 'sh.st' in vid_url:
continue
if 'linx' in vid_url:
log_utils.log('2DDL - sources - linx: ' + str(vid_url))
continue
if '.rar' not in vid_url:
if '.srt' not in vid_url:
'SD',info = source_utils.get_release_quality(url, vid_url)
host = vid_url.split('//')[1].replace('www.','')
host = host.split('/')[0].lower()
sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': vid_url, 'info': info, 'direct': False, 'debridonly': False})
return sources
except Exception, argument:
return sources
示例15: movie
def movie(self, imdb, title, localtitle, aliases, year):
try:
if debrid.status() == False: raise Exception()
query = self.search_link % (self.base_link, urllib.quote_plus(title).replace('+', '-'))
html = client.request(query, XHR=True)
results = re.compile('<ul id=first-carousel1(.+?)</ul>',re.DOTALL).findall(html)
result = re.compile('alt="(.+?)".+?<h2><a href="(.+?)".+?</h2>.+?>(.+?)</p>',re.DOTALL).findall(str(results))
for found_title,url,date in result:
new_url = self.base_link + url
if cleantitle.get(title) in cleantitle.get(found_title):
if year in date:
return new_url
except:
return