本文整理汇总了Python中resources.lib.modules.cleantitle.geturl函数的典型用法代码示例。如果您正苦于以下问题:Python geturl函数的具体用法?Python geturl怎么用?Python geturl使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了geturl函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = self.tvsearch_link % cleantitle.geturl(tvshowtitle)
r = urlparse.urljoin(self.base_link, url)
r = client.request(r, limit='1')
r = client.parseDOM(r, 'title')
if not r:
url = 'http://www.imdb.com/title/%s' % imdb
url = client.request(url, headers={'Accept-Language':'es-ES'})
url = client.parseDOM(url, 'title')[0]
url = re.sub('\((?:.+?|)\d{4}.+', '', url).strip()
url = cleantitle.normalize(url.encode("utf-8"))
url = self.tvsearch_link % cleantitle.geturl(url)
r = urlparse.urljoin(self.base_link, url)
r = client.request(r, limit='1')
r = client.parseDOM(r, 'title')
if not year in r[0]: raise Exception()
return url
except:
return
示例2: movie
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.moviesearch_link % cleantitle.geturl(title)
r = urlparse.urljoin(self.base_link, url)
r = client.request(r, limit='1', timeout='10')
r = client.parseDOM(r, 'title')
if not r:
url = 'http://www.imdb.com/title/%s' % imdb
url = client.request(url, headers={'Accept-Language':'es-ES'}, timeout='10')
url = client.parseDOM(url, 'title')[0]
url = re.sub('(?:\(|\s)\d{4}.+', '', url).strip()
url = cleantitle.normalize(url.encode("utf-8"))
url = self.moviesearch_link % cleantitle.geturl(url)
r = urlparse.urljoin(self.base_link, url)
r = client.request(r, limit='1', timeout='10')
r = client.parseDOM(r, 'title')
if not year in r[0]: raise Exception()
return url
except:
pass
示例3: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if not str(url).startswith('http'):
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
if 'tvshowtitle' in data:
url = '%s%s' % (self.search_link, cleantitle.getsearch(data['tvshowtitle']))
url = urlparse.urljoin(self.base_link, url)
r = client.request(url, timeout='10')
t = cleantitle.query(data['tvshowtitle'])
ref = client.parseDOM(r, 'a', ret='href', attrs = {'title': t }) [0]
url = '%s/%s-ep-%01d/' % (ref, cleantitle.geturl(data['tvshowtitle']), int(data['episode']))
else:
url = '%s/movie/%s-engsub/%s-ep-1/' % (self.base_link, cleantitle.geturl(data['title']), cleantitle.geturl(data['title']))
url = client.request(url, timeout='10', output='geturl')
if url == None: raise Exception()
else:
url = urlparse.urljoin(self.base_link, url)
r = client.request(url, timeout='10')
r = client.request(url, timeout='10')
r = client.parseDOM(r, 'iframe', ret='src')
for i in r:
if 'drama4u' in i or 'k-vid' in i:
i = client.request(i, timeout='10')
i = re.findall('(https:\W.redirector\..*?)[\'\"]', i)
for g in i:
g = g.replace("\\", "")
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(g)[0]['quality'], 'language': 'ko', 'url': g, 'direct': True, 'debridonly': False})
except: pass
elif 'ads' in i:
pass
else:
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(i.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = host.encode('utf-8')
sources.append({'source': host, 'quality': 'SD', 'language': 'ko', 'url': i, 'direct': False, 'debridonly': False})
return sources
except:
return sources
示例4: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if not str(url).startswith('http'):
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
if 'tvshowtitle' in data:
url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
year = re.findall('(\d{4})', data['premiered'])[0]
url = client.request(url, output='geturl')
if url == None: raise Exception()
r = client.request(url)
y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0]
y = re.findall('(\d{4})', y)[0]
if not y == year: raise Exception()
else:
url = '%s/movies/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year'])
url = client.request(url, output='geturl')
if url == None: raise Exception()
r = client.request(url)
else:
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')\s*,.+?label\s*:\s*(?:\"|\')(.+?)(?:\"|\')', r)
for i in r:
try:
if '1080' in i[1]: quality = '1080p'
elif '720' in i[1]: quality = 'HD'
else: raise Exception()
url = i[0].replace('\/', '/')
url = client.replaceHTMLCodes(url)
if not '.php' in i[0]: raise Exception()
url = url.encode('utf-8')
sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例5: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if not str(url).startswith('http'):
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
if 'tvshowtitle' in data:
url = '%s/episode/%s-s%02de%02d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
year = re.findall('(\d{4})', data['premiered'])[0]
url = client.request(url, output='geturl')
if url == None: raise Exception()
r = client.request(url)
y = client.parseDOM(r, 'span', attrs = {'class': 'date'})
y += [i for i in client.parseDOM(r, 'div', attrs = {'class': 'metadatac'}) if 'date' in i]
y = re.findall('(\d{4})', y[0])[0]
if not y == year: raise Exception()
else:
url = '%s/movie/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year'])
url = client.request(url, output='geturl')
if url == None: raise Exception()
r = client.request(url)
else:
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
links = client.parseDOM(r, 'iframe', ret='src')
for link in links:
try:
valid, hoster = source_utils.is_host_valid(link, hostDict)
if not valid: continue
urls, host, direct = source_utils.check_directstreams(link, hoster)
for x in urls:
if x['quality'] == 'SD':
try:
if 'HDTV' in x['url'] or '720' in x['url']: x['quality'] = 'HD'
if '1080' in x['url']: x['quality'] = '1080p'
except:
pass
sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例6: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if not str(url).startswith('http'):
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
if 'tvshowtitle' in data:
url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
year = re.findall('(\d{4})', data['premiered'])[0]
else:
url = '%s/movies/%s/' % (self.base_link, cleantitle.geturl(data['title']))
year = data['year']
url = client.request(url, timeout='10', output='geturl')
if url == None: raise Exception()
r = client.request(url, timeout='10')
y = client.parseDOM(r, 'a', attrs={'rel': 'tag', 'href': '[^\'"]*year[^\'"]*'})[0]
y = re.findall('(\d{4})', y)[0]
if not y == year: raise Exception()
else:
url = urlparse.urljoin(self.base_link, url)
r = client.request(url, timeout='10')
links = client.parseDOM(r, 'iframe', ret='src')
for link in links:
try:
url = link.replace('\/', '/')
url = client.replaceHTMLCodes(url)
url = 'http:' + url if url.startswith('//') else url
url = url.encode('utf-8')
if not '.php' in url: raise Exception()
r = client.request(url, timeout='10')
r = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', r)
for i in r:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
except: pass
except:
pass
return sources
except:
return sources
示例7: movie
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = '%s/movies/%s-%s/' % (self.base_link, cleantitle.geturl(title),year)
r = self.scraper.get(url).content
if '<h2>ERROR <span>404</span></h2>' in r:
url = '%s/movies/%s/' % (self.base_link, cleantitle.geturl(title))
r = self.scraper.get(url).content
if '<h2>ERROR <span>404</span></h2>' in r: return
return url
except:
return
示例8: movie
def movie(self, imdb, title, localtitle, aliases, year):
try:
if debrid.status() == False: raise Exception()
url = urlparse.urljoin(self.base_link, '%s-%s' % (cleantitle.geturl(title), year))
url = client.request(url, output='geturl')
if url == None:
url = urlparse.urljoin(self.base_link, '%s' % (cleantitle.geturl(title)))
url = client.request(url, output='geturl')
if url == None: raise Exception()
return url
except:
return
示例9: movie
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = '%s/movies/%s-%s/' % (self.base_link, cleantitle.geturl(title),year)
url = client.request(url, output='geturl')
if url == None or not cleantitle.geturl(title) in url:
url = '%s/movies/%s/' % (self.base_link, cleantitle.geturl(title))
url = client.request(url, output='geturl')
if url == None or not cleantitle.geturl(title) in url: raise Exception
return url
except:
return
示例10: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
ep_id = "%01dx%01d" % (int(season), int(episode))
url = self.shows_link % (cleantitle.geturl(title), season, cleantitle.geturl(title), ep_id)
url = urlparse.urljoin(self.base_link, url)
url = url.encode('utf-8')
print("Chillflix shows url", url)
return url
except:
return
示例11: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if not str(url).startswith('http'):
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
if 'tvshowtitle' in data:
url = '%s/drama/%s/episode-%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['episode']))
else:
url = '%s/movie/%s/' % (self.base_link, cleantitle.geturl(data['title']))
url = client.request(url, timeout='10', output='geturl')
if url == None: raise Exception()
else:
url = urlparse.urljoin(self.base_link, url)
r = client.request(url, timeout='10')
r = client.request(url, timeout='10')
links = client.parseDOM(r, 'iframe', ret='src')
for link in links:
if 'vidnow' in link:
r = client.request(link, timeout='10')
s = re.findall('window\.atob\(\"(.*?)\"\)', r)
r = re.findall('(https:.*?(openload|redirector).*?)[\'\"]', r)
for i in s:
i = base64.b64decode(i)
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'ko', 'url': i, 'direct': True, 'debridonly': False})
except: pass
for i in r:
if 'openload' in i:
try: sources.append({'source': 'openload', 'quality': 'SD', 'language': 'ko', 'url': i[0], 'direct': False, 'debridonly': False})
except: pass
elif 'google' in i:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'ko', 'url': i[0], 'direct': True, 'debridonly': False})
except: pass
else: pass
else: pass
return sources
except:
return sources
示例12: movie
def movie(self, imdb, title, localtitle, aliases, year):
try:
title = cleantitle.geturl(title).replace('-', '+')
u = self.base_link + self.search_link % title
u = client.request(u)
i = client.parseDOM(u, "div", attrs={"class": "movies-list"})
for r in i:
r = re.compile('<a href="(.+?)"').findall(r)
for url in r:
title = cleantitle.geturl(title).replace("+", "-")
if not title in url:
continue
return url
except:
return
示例13: searchMovie
def searchMovie(self, title, year, aliases, headers):
try:
for alias in aliases:
url = '%s/full-movie/%s' % (self.base_link, cleantitle.geturl(alias['title']))
url = client.request(url, headers=headers, output='geturl', timeout='10')
if not url == None and url != self.base_link: break
if url == None:
for alias in aliases:
url = '%s/full-movie/%s-%s' % (self.base_link, cleantitle.geturl(alias['title']), year)
url = client.request(url, headers=headers, output='geturl', timeout='10')
if not url == None and url != self.base_link: break
return url
except:
return
示例14: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
with requests.Session() as s:
episode_link = "http://xmovies8.es" + cleantitle.geturl(url['tvshowtitle']) + "-s" + url['season'] + "-e" + url[
'episode']
p = s.get(episode_link)
soup = BeautifulSoup(p.text, 'html.parser')
iframes = soup.findAll('iframe')
for i in iframes:
if 'thevideo' in i.get('src'):
sources.append(
{'source': "thevideo.me", 'quality': 'SD', 'language': "en", 'url': i['src'], 'info': '',
'direct': False, 'debridonly': False})
if 'openload' in i['src']:
sources.append(
{'source': "openload.co", 'quality': 'SD', 'language': "en", 'url': i['src'], 'info': '',
'direct': False, 'debridonly': False})
if 'vshare' in i['src']:
sources.append(
{'source': "vshare.eu", 'quality': 'SD', 'language': "en", 'url': i['src'], 'info': '',
'direct': False, 'debridonly': False})
return sources
except:
print("Unexpected error in Beetv Script: source", sys.exc_info()[0])
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, exc_tb.tb_lineno)
return url
示例15: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
if 'tvshowtitle' in data:
url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
year = re.findall('(\d{4})', data['premiered'])[0]
r = client.request(url)
y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0]
y = re.findall('(\d{4})', y)[0]
if not y == year: raise Exception()
else:
r = client.request(url)
result = re.findall('''['"]file['"]:['"]([^'"]+)['"],['"]label['"]:['"]([^'"]+)''', r)
for i in result:
url = i[0].replace('\/', '/')
sources.append({'source': 'gvideo', 'quality': source_utils.label_to_quality(i[1]), 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
return sources
except:
return