本文整理汇总了Python中resources.lib.modules.cleantitle.query函数的典型用法代码示例。如果您正苦于以下问题:Python query函数的具体用法?Python query怎么用?Python query使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了query函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __search
def __search(self, search_link, imdb, title):
try:
query = search_link % (urllib.quote_plus(cleantitle.query(title)))
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(title)
tq = cleantitle.query(title)
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'big-list'})
r = dom_parser.parse_dom(r, 'table', attrs={'class': 'row'})
r = dom_parser.parse_dom(r, 'td', attrs={'class': 'list-name'})
r = dom_parser.parse_dom(r, 'a', req='href')
r = [(i.attrs['href'], i.content) for i in r if i]
url = [i[0] for i in r if t == cleantitle.get(i[1])]
url = url[0] if len(url) > 0 else [i[0] for i in r if tq == cleantitle.query(i[1])][0]
url = source_utils.strip_domain(url)
r = client.request(urlparse.urljoin(self.base_link, url))
r = dom_parser.parse_dom(r, 'a', attrs={'href': re.compile('.*/tt\d+.*')}, req='href')
r = [re.findall('.+?(tt\d+).*?', i.attrs['href']) for i in r]
r = [i[0] for i in r if i]
return url if imdb in r else None
except:
return
示例2: search
def search(self, title, localtitle, year, search_type):
try:
url = self.do_search(cleantitle.query(title), title, localtitle, year, search_type)
if not url:
url = self.do_search(cleantitle.query(localtitle), title, localtitle, year, search_type)
return url
except:
return
示例3: __search
def __search(self, titles, year, imdb):
try:
query = self.search_link % urllib.quote_plus(cleantitle.query(titles[0]))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie_cell'})
r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'bottom'}), dom_parser.parse_dom(i, 'div', attrs={'class': 'year'})) for i in r]
r = [(dom_parser.parse_dom(i[0], 'a', req=['href', 'title']), re.findall('[(](\d{4})[)]', i[1][0].content)) for i in r if i[0] and i[1]]
r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0]) for i in r if i[0] and i[1]]
r = [(i[0], i[1].lower(), i[2]) for i in r if i[2] in y]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t]
if len(r) > 1:
for i in r:
data = client.request(urlparse.urljoin(self.base_link, i))
data = dom_parser.parse_dom(data, 'a', attrs={'name': re.compile('.*/tt\d+.*')}, req='name')
data = [re.findall('.+?(tt\d+).*?', d.attrs['name']) for d in data]
data = [d[0] for d in data if len(d) > 0 and d[0] == imdb]
if len(data) >= 1:
url = i
else:
url = r[0]
if url:
return source_utils.strip_domain(url)
except:
return
示例4: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
t = cleantitle.get(tvshowtitle)
q = urllib.quote_plus(cleantitle.query(tvshowtitle))
p = urllib.urlencode({'term': q})
h = {'X-Requested-With': 'XMLHttpRequest'}
r = client.request(self.search_link, post=p, headers=h)
try: r = json.loads(r)
except: r = None
print ("WATCHSERIES RESULT", r)
r = [(i['seo_url'], i['value'], i['label']) for i in r if 'value' in i and 'label' in i and 'seo_url' in i]
r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r]
r = [(i[0], i[1], i[2][-1]) for i in r if i[2]]
r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
print ("WATCHSERIES RESULT 4", r, year)
url = r[0][0]
print ("WATCHSERIES RESULT 5", r, url)
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
except: pass
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
except: pass
url = url.strip('/').split('/')[-1]
url = url.encode('utf-8')
return url
except:
return
示例5: __search
def __search(self, titles, year, content):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
c = client.request(urlparse.urljoin(self.base_link, self.year_link % int(year)), output='cookie')
p = urllib.urlencode({'search': cleantitle.query(titles[0])})
c = client.request(urlparse.urljoin(self.base_link, self.search_link), cookie=c, post=p, output='cookie')
r = client.request(urlparse.urljoin(self.base_link, self.type_link % content), cookie=c, post=p)
r = dom_parser.parse_dom(r, 'div', attrs={'id': 'content'})
r = dom_parser.parse_dom(r, 'tr')
r = [dom_parser.parse_dom(i, 'td') for i in r]
r = [dom_parser.parse_dom(i, 'a', req='href') for i in r]
r = [(i[0].attrs['href'], i[0].content, i[1].content) for i in r if i]
x = []
for i in r:
if re.search('(?<=<i>\().*$', i[1]):
x.append((i[0], re.search('(.*?)(?=\s<)', i[1]).group(), re.search('(?<=<i>\().*$', i[1]).group(), i[2]))
else:
x.append((i[0], i[1], i[1], i[2]))
r = [i[0] for i in x if (cleantitle.get(i[1]) in t or cleantitle.get(i[2]) in t) and i[3] == year][0]
return source_utils.strip_domain(r)
except:
return
示例6: movie
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.search_link % (cleantitle.geturl(title), year)
q = urlparse.urljoin(self.base_link, url)
r = proxy.geturl(q)
if not r == None: return url
t = cleantitle.get(title)
q = self.search_link_2 % urllib.quote_plus(cleantitle.query(title))
q = urlparse.urljoin(self.base_link, q)
r = client.request(q)
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
r = [(i[0], re.findall('(?:\'|\")(.+?)(?:\'|\")', i[1])) for i in r]
r = [(i[0], [re.findall('(.+?)\((\d{4})', x) for x in i[1]]) for i in r]
r = [(i[0], [x[0] for x in i[1] if x]) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]]
url = re.findall('(?://.+?|)(/.+)', r[0])[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例7: __search
def __search(self, titles, year):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])+' '+year))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i][0]
r = client.request(query)
r = client.parseDOM(r, 'div', attrs={'class': 'karatula'})
for i in r:
title = client.parseDOM(i, 'a', ret='title')[0]
y = re.findall('(\d{4})',title)[0]
title = cleantitle.get_simple(title)
if t in title and y == year :
x = dom_parser.parse_dom(i, 'a', req='href')
return source_utils.strip_domain(x[0][0]['href'])
return
except:
return
示例8: __search
def __search(self, titles, year, season='0'):
try:
query = self.search_link % urllib.quote_plus(cleantitle.query(titles[0]))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = dom_parser.parse_dom(r, 'article', attrs={'class': 'shortstory'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 's_info'})
r = dom_parser.parse_dom(r, 'h2')
r = dom_parser.parse_dom(r, 'a', req='href')
r = [(i.attrs['href'], i.content.lower()) for i in r if i]
r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1]), re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = [(i[0], i[1], i[2], re.findall('(.+?)(\d+)\s+(?:staf+el|s)', i[1])) for i in r]
r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
r = [(i[0], i[1], i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0]for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]
return source_utils.strip_domain(r)
except:
return
示例9: __search
def __search(self, title, year):
try:
r = client.request(self.base_link)
r = re.findall('sL10n\s*=\s*({.*?});', r)[0]
r = json.loads(r)['nonce']
query = self.search_link % (urllib.quote_plus(cleantitle.query(title)), r)
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(title)
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = json.loads(r)
r = [(i, r[i].get('url', ''), r[i].get('title', ''), r[i].get('extra', {}).get('names', ''), r[i].get('extra', {}).get('date', '0')) for i in r]
r = [(i[0], i[1], client.replaceHTMLCodes(i[2]), client.replaceHTMLCodes(i[3]), i[4]) for i in r]
r = sorted(r, key=lambda i: int(i[4]), reverse=True) # with year > no year
r = [i[1] for i in r if (t == cleantitle.get(i[2]) or t == cleantitle.get(i[3])) and i[4] in y][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例10: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
t = cleantitle.get(tvshowtitle)
q = urllib.quote_plus(cleantitle.query(tvshowtitle))
p = urllib.urlencode({'term': q})
r = client.request(self.search_link, post=p, XHR=True)
try: r = json.loads(r)
except: r = None
if r:
r = [(i['seo_url'], i['value'], i['label']) for i in r if 'value' in i and 'label' in i and 'seo_url' in i]
else:
r = requests.get(self.search_link_2 % q, 'tv shows').text
r = client.parseDOM(r, 'div', attrs = {'valign': '.+?'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a')) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]
r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r]
r = [(i[0], i[1], i[2][-1]) for i in r if i[2]]
r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
url = r[0][0]
url = proxy.parse(url)
url = url.strip('/').split('/')[-1]
url = url.encode('utf-8')
return url
except:
return
示例11: __search
def __search(self, title, season):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query(title)))
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(title)
r = client.request(query)
r = client.parseDOM(r, 'div', attrs={'class': 'moviefilm'})
r = client.parseDOM(r, 'div', attrs={'class': 'movief'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r]
r = [(i[0][0], i[1][0].lower()) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], i[1], re.findall('(.+?)\s+(?:saison)\s+(\d+)', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = [(i[0], i[1], re.findall('\((.+?)\)$', i[1]), i[2]) for i in r]
r = [(i[0], i[2][0] if len(i[2]) > 0 else i[1], i[3]) for i in r]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(i[2]) == int(season)][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例12: movie
def movie(self, imdb, title, year):
try:
t = cleantitle.get(title)
q = '/search/%s.html' % (urllib.quote_plus(cleantitle.query(title)))
q = urlparse.urljoin(self.base_link, q)
for i in range(3):
r = client.request(q)
if not r == None: break
r = client.parseDOM(r, 'div', attrs = {'class': 'ml-item'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if i[0] and i[1]]
r = [i[0] for i in r if t == cleantitle.get(i[1])][:2]
r = [(i, re.findall('(\d+)', i)[-1]) for i in r]
for i in r:
try:
y, q = cache.get(self.ymovies_info, 9000, i[1])
if not y == year: raise Exception()
return urlparse.urlparse(i[0]).path
except:
pass
except:
return
示例13: __search
def __search(self, titles):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'nag'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item-video'})
r = dom_parser.parse_dom(r, 'h2', attrs={'class': 'entry-title'})
r = dom_parser.parse_dom(r, 'a', req='href')
for i in r:
title = i[1]
if re.search('\*(?:.*?)\*', title) is not None:
title = re.sub('\*(?:.*?)\*', '', title)
title = cleantitle.get(title)
if title in t:
return source_utils.strip_domain(i[0]['href'])
else:
return
except:
return
示例14: __search
def __search(self, titles, type, year, season=0, episode=False):
try:
years = [str(year), str(int(year) + 1), str(int(year) - 1)]
years = ['&veroeffentlichung[]=%s' % i for i in years]
query = self.search_link % (type, urllib.quote_plus(cleantitle.query(titles[0])))
query += ''.join(years)
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
r = self.__proceed_search(query)
r = [i[0] for i in r if cleantitle.get(i[1]) in t and int(i[2]) == int(season)][0]
url = source_utils.strip_domain(r)
if episode:
r = client.request(urlparse.urljoin(self.base_link, url))
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'season-list'})
r = dom_parser.parse_dom(r, 'li')
r = dom_parser.parse_dom(r, 'a', req='href')
r = [i.attrs['href'] for i in r if i and int(i.content) == int(episode)][0]
url = source_utils.strip_domain(r)
return url
except:
return
示例15: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year):
try:
query = base64.b64decode(self.search_link) % urllib.quote_plus(cleantitle.query(tvshowtitle))
result = self.request(query)
tvshowtitle = cleantitle.get(tvshowtitle)
years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
result = [i for i in result if any(x in str(i['year']) for x in years)]
match = [i['href'] for i in result if tvshowtitle == cleantitle.get(i['name'])]
match = [i['href'] for i in result if tvshowtitle == cleantitle.get(i['name']) and str(year) == str(i['year'])]
match2 = [i['href'] for i in result]
match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
if match2 == []: return
for i in match2[:5]:
try:
if len(match) > 0: url = match[0] ; break
if imdb in str(self.request(i)[0]['imdb']): url = i ; break
except:
pass
url = '/' + url.split('/json/')[-1]
url = url.encode('utf-8')
return url
except:
return