本文整理汇总了Python中resources.lib.libraries.cleantitle.get函数的典型用法代码示例。如果您正苦于以下问题:Python get函数的具体用法?Python get怎么用?Python get使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_episode
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
if url == None: return
url = '%s/serie/%s' % (self.base_link, url)
myses = 's%s_e%s' %(season, episode)
r = client.request(url)
r = client.parseDOM(r, 'li', attrs = {'itemprop': 'episode'})
t = cleantitle.get(title)
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'itemprop': 'name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r]
r = [(i[0], i[1][0].split(' ')[-1], i[2]) for i in r if i[1]] + [(i[0], None, i[2]) for i in r if not i[1]]
r = [(i[0], i[1], i[2][0]) for i in r if i[2]] + [(i[0], i[1], None) for i in r if not i[2]]
r = [(i[0][0], i[1], i[2]) for i in r if i[0]]
url = [i for i in r if t == cleantitle.get(i[1]) and myses in i[0]][:1]
if not url: url = [i for i in r if t == cleantitle.get(i[1])]
if len(url) > 1 or not url: url = [i for i in r if date == i[2]]
if len(url) > 1 or not url: raise Exception()
url = url[0][0]
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例2: get_movie
def get_movie(self, imdb, title, year):
try:
tk = cache.get(self.putlocker_token, 8)
set = self.putlocker_set()
rt = self.putlocker_rt(tk + set)
sl = self.putlocker_sl()
tm = int(time.time() * 1000)
headers = {'X-Requested-With': 'XMLHttpRequest'}
url = self.search_link
post = {'q': title.lower(), 'limit': '20', 'timestamp': tm, 'verifiedCheck': tk, 'set': set, 'rt': rt, 'sl': sl}
print("POST",post)
post = urllib.urlencode(post)
r = client.request(url, post=post, headers=headers, output='')
print("R",r)
r = json.loads(r)
t = cleantitle.get(title)
r = [i for i in r if 'year' in i and 'meta' in i]
r = [(i['permalink'], i['title'], str(i['year']), i['meta'].lower()) for i in r]
r = [i for i in r if 'movie' in i[3]]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
print("U",url)
return url
except:
return
示例3: get_show
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
r = 'search/tvdb/%s?type=show&extended=full' % tvdb
r = json.loads(trakt.getTrakt(r))
if not r: return '0'
d = r[0]['show']['genres']
if not ('anime' in d or 'animation' in d): return '0'
tv_maze = tvmaze.tvMaze()
tvshowtitle = tv_maze.showLookup('thetvdb', tvdb)
tvshowtitle = tvshowtitle['name']
t = cleantitle.get(tvshowtitle)
q = self.search_link % (urllib.quote_plus(tvshowtitle))
q = urlparse.urljoin(self.base_link, q)
r = client.request(q)
r = client.parseDOM(r, 'ol', attrs={'id': 'searchresult'})[0]
r = client.parseDOM(r, 'h2')
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1])) for i in r]
r = [i for i in r if t == cleantitle.get(i[1])]
r = r[-1][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例4: searchShow
def searchShow(self, title, season, year):
try:
title = cleantitle.normalize(title)
t = cleantitle.get(title)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query('%s Season %01d' % (title.replace('\'', '-'), int(season)))))
r = client.request(url, timeout='10')
r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
if r:
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?)\s+-\s+Season\s+(\d+)', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(season) == int(i[2])][0]
else:
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query('%s %01d' % (title.replace('\'', '-'), int(year)))))
r = client.request(url, timeout='10')
r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
return url.encode('utf-8')
except:
return
示例5: get_episode
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
t = cleantitle.get(data['tvshowtitle'])
year = re.findall('(\d{4})', date)[0]
years = [str(year), str(int(year)+1), str(int(year)-1)]
season = '%01d' % int(season)
episode = '%01d' % int(episode)
headers = {'X-Requested-With': 'XMLHttpRequest'}
query = urllib.urlencode({'keyword': '%s - Season %s' % (data['tvshowtitle'], season)})
url = urlparse.urljoin(self.base_link, self.search_link)
r = client.request(url, post=query, headers=headers)
r = json.loads(r)['content']
r = zip(client.parseDOM(r, 'a', ret='href', attrs = {'class': 'ss-title'}), client.parseDOM(r, 'a', attrs = {'class': 'ss-title'}))
r = [(i[0], re.findall('(.+?) - season (\d+)$', i[1].lower())) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i for i in r if t == cleantitle.get(i[1])]
r = [i[0] for i in r if season == '%01d' % int(i[2])][:2]
r = [(i, re.findall('(\d+)', i)[-1]) for i in r]
for i in r:
try:
y, q = cache.get(self.muchmovies_info, 9000, i[1])
if not y in years: raise Exception()
return urlparse.urlparse(i[0]).path + '?episode=%01d' % int(episode)
except:
pass
except:
return
示例6: matchAlias
def matchAlias(self, title, aliases):
try:
for alias in aliases:
if cleantitle.get(title) == cleantitle.get(alias['title']):
return True
except:
return False
示例7: get_episode
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
return
try:
if url == None: return
url = urlparse.parse_qs(url)
print url
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
print url
result = cache.get(self.tvshow_cache, 120)
tvshowtitle = cleantitle.get(url['tvshowtitle'])
for i in result:
if cleantitle.get(tvshowtitle) in cleantitle.get(i[1]):
print("MAM", i)
result = [i[0] for i in result if cleantitle.get(tvshowtitle) in cleantitle.get(i[1])]
url = [i for i in url.split('/') if not i == '']
url['title'], url['season'], url['episode'] = title, season, episode
url = urllib.urlencode(url)
print("URL",url)
#view-source:http://alltube.tv/marco-polo/odcinek-4/odcinek-4-sezon-2/62284
url = '/%s/odcinek-%s/odcinek-%s-sezon-%s/%s' % (url[1],int(episode),int(episode),int(season), url[2])
print("URL", url)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例8: get_episode
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
print url
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
print url
result = cache.get(self.tvshow_cache, 120)
tvshowtitle = cleantitle.get(url['tvshowtitle'])
for i in result:
if cleantitle.get(tvshowtitle) in cleantitle.get(i[1]):
print("MAM", i)
result = [i[0] for i in result if cleantitle.get(tvshowtitle) in cleantitle.get(i[1])][0]
txts = 's%02de%02d' % (int(season),int(episode))
print result,title,txts
result = client.source(result)
result = client.parseDOM(result, 'li', attrs = {'class': 'episode'})
result = [i for i in result if txts in i][0]
url = client.parseDOM(result, 'a', ret='href')[0]
url = url.encode('utf-8')
return url
except:
return
示例9: get_movie
def get_movie(self, imdb, title, year):
try:
t = cleantitle.query2(title)
hash = hashlib.md5(title).hexdigest()
query = urllib.urlencode({'keyword': title, 'hash':hash})
url = urlparse.urljoin(self.base_link, self.search_link)
r = client.request(url, post=query, headers=self.headers)
r = json.loads(r)['content']
r = zip(client.parseDOM(r, 'a', ret='href', attrs = {'class': 'ss-title'}), client.parseDOM(r, 'a', attrs = {'class': 'ss-title'}))
r = [i[0] for i in r if cleantitle.get(t) == cleantitle.get(i[1])][:2]
r = [(i, re.findall('(\d+)', i)[-1]) for i in r]
url = None
print r
for i in r:
try:
print i[1]
#y, q = cache.get(self.myesmovies_info(), 9000, i[1])
y, q = self.myesmovies_info(i[0])
print("yQ",y,q,year)
if not y == year:
print "NOT",type(y),type(year)
raise Exception()
return urlparse.urlparse(i[0]).path
except:
pass
print(url)
return url
except Exception as e:
control.log('Error %s' % e)
return
示例10: get_movie
def get_movie(self, imdb, title, year):
try:
query = urlparse.urljoin(self.base_link, self.search_link)
query = query % urllib.quote_plus(title)
#for i in range(5):
r = client.request(query)
# if not r == None: break
t = cleantitle.get(title)
r = client.parseDOM(r, 'div', attrs = {'class': 'col-lg.+?'})
print("R1",r)
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], i[1], re.findall('(\d{4})', i[1])) for i in r]
r = [(i[0], i[1], i[2][-1]) for i in r if len(i[2]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
print("R6", r)
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except Exception as e:
control.log('ERROR XMOVIES GET %s' % e)
return
示例11: get_episode
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
if url == None: return
url = urlparse.urljoin(self.base_link, url)
result = proxy.request(url, 'main_body')
result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})
title = cleantitle.get(title)
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in result]
result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]
url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
if len(url) == 0: url = [i for i in result if premiered == i[2]]
if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]
url = client.replaceHTMLCodes(url[0][0])
url = proxy.parse(url)
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例12: tvrageEpisode
def tvrageEpisode(self, tvrage, title, date, season, episode):
monthMap = {'01':'Jan', '02':'Feb', '03':'Mar', '04':'Apr', '05':'May', '06':'Jun', '07':'Jul', '08':'Aug', '09':'Sep', '10':'Oct', '11':'Nov', '12':'Dec'}
title = cleantitle.get(title)
try:
url = self.tvrage_link % tvrage
result = client.request(url, timeout='5')
search = re.compile('<td.+?><a.+?title=.+?season.+?episode.+?>(\d+?)x(\d+?)<.+?<td.+?>(\d+?/.+?/\d+?)<.+?<td.+?>.+?href=.+?>(.+?)<').findall(result.replace('\n',''))
d = '%02d/%s/%s' % (int(date.split('-')[2]), monthMap[date.split('-')[1]], date.split('-')[0])
match = [i for i in search if d == i[2]]
if len(match) == 1: return (str('%01d' % int(match[0][0])), str('%01d' % int(match[0][1])))
match = [i for i in search if title == cleantitle.get(i[3])]
if len(match) == 1: return (str('%01d' % int(match[0][0])), str('%01d' % int(match[0][1])))
except:
pass
try:
url = self.epguides_link % tvrage
result = client.request(url, timeout='5')
search = re.compile('\d+?,(\d+?),(\d+?),.+?,(\d+?/.+?/\d+?),"(.+?)",.+?,".+?"').findall(result)
d = '%02d/%s/%s' % (int(date.split('-')[2]), monthMap[date.split('-')[1]], date.split('-')[0][-2:])
match = [i for i in search if d == i[2]]
if len(match) == 1: return (str('%01d' % int(match[0][0])), str('%01d' % int(match[0][1])))
match = [i for i in search if title == cleantitle.get(i[3])]
if len(match) == 1: return (str('%01d' % int(match[0][0])), str('%01d' % int(match[0][1])))
except:
pass
示例13: get_movie
def get_movie(self, imdb, title, year):
try:
t = cleantitle.get(title)
q = self.search_link_2 % (urllib.quote_plus(cleantitle.query(title)))
q = q.replace('+','-')
q = urlparse.urljoin(self.base_link, q)
r = self.request(q)[0]
r = client.parseDOM(r, 'div', attrs = {'class': 'ml-item'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a', ret='data-url')) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1]]
#else:
# r = zip(client.parseDOM(r, 'a', ret='href', attrs = {'class': 'ss-title'}), client.parseDOM(r, 'a', attrs = {'class': 'ss-title'}))
r = [(i[0],i[2]) for i in r if cleantitle.get(t) == cleantitle.get(i[1])][:2]
r = [(i[0], re.findall('(\d+)', i[1])[-1]) for i in r]
for i in r:
try:
y, q = cache.get(self.muchmovies_info, 9000, i[1])
if not y == year: raise Exception()
return urlparse.urlparse(i[0]).path
except:
pass
except:
return
示例14: get_show
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
tk = cache.get(self.putlocker_token, 8)
st = self.putlocker_set() ; rt = self.putlocker_rt(tk + st)
tm = int(time.time() * 1000)
headers = {'X-Requested-With': 'XMLHttpRequest'}
url = urlparse.urljoin(self.base_link, self.search_link)
post = {'q': tvshowtitle.lower(), 'limit': '100', 'timestamp': tm, 'verifiedCheck': tk, 'sl': st, 'rt': rt}
post = urllib.urlencode(post)
r = client.request(url, post=post, headers=headers)
print(">>>",r)
r = json.loads(r)
t = cleantitle.get(tvshowtitle)
r = [i for i in r if 'year' in i and 'meta' in i]
r = [(i['permalink'], i['title'], str(i['year']), i['meta'].lower()) for i in r]
r = [i for i in r if 'tv' in i[3]]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例15: get_show
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
if (self.user == '' or self.password == ''): raise Exception()
cookie = client.source(self.sign, post=self.post, headers=self.headers, cookie=self.lang, output='cookie')
cookie = '%s; %s' % (cookie, self.lang)
url = urlparse.urljoin(self.base_link, self.tvsearch_link)
result = client.source(url, cookie=cookie)
tvshowtitle = cleantitle.get(tvshowtitle)
years = ['%s' % str(year)]
result = client.parseDOM(result, 'div', attrs={'class': 'index show'})
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs={'class': 'name'}),
client.parseDOM(i, 'span', attrs={'class': 'value'})) for i in result]
result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
result = [i for i in result if tvshowtitle == cleantitle.get(i[1])]
result = [i[0] for i in result if any(x in i[2] for x in years)][0]
url = urlparse.urljoin(self.base_link, result)
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return