本文整理汇总了Python中salts_lib.scraper_utils.height_get_quality函数的典型用法代码示例。如果您正苦于以下问题:Python height_get_quality函数的具体用法?Python height_get_quality怎么用?Python height_get_quality使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了height_get_quality函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: resolve_link
def resolve_link(self, link):
try:
headers = dict([item.split('=') for item in (link.split('|')[1]).split('&')])
for key in headers: headers[key] = urllib.unquote(headers[key])
link = link.split('|')[0]
except:
headers = {}
if not link.startswith('http'):
link = urlparse.urljoin(self.base_url, link)
html = self._http_get(link, headers=headers, cache_limit=0)
fragment = dom_parser.parse_dom(html, 'div', {'class': 'player'})
if fragment:
iframe_url = dom_parser.parse_dom(fragment[0], 'iframe', ret='src')
if iframe_url:
iframe_url = iframe_url[0]
headers = {'Referer': link}
html = self._http_get(iframe_url, headers=headers, cache_limit=0)
sitekey = dom_parser.parse_dom(html, 'div', {'class': 'g-recaptcha'}, ret='data-sitekey')
if sitekey:
token = recaptcha_v2.UnCaptchaReCaptcha().processCaptcha(sitekey[0], lang='en')
if token:
data = {'g-recaptcha-response': token}
html = self._http_get(iframe_url, data=data, cache_limit=0)
log_utils.log(html)
match = re.search("\.replace\(\s*'([^']+)'\s*,\s*'([^']*)'\s*\)", html, re.I)
if match:
html = html.replace(match.group(1), match.group(2))
match = re.search("window\.atob[\([]+'([^']+)", html)
if match:
func_count = len(re.findall('window\.atob', html))
html = match.group(1)
for _i in xrange(func_count):
html = base64.decodestring(html)
streams = []
for match in re.finditer('''<source[^>]+src=["']([^'"]+)[^>]+label=['"]([^'"]+)''', html):
streams.append(match.groups())
if len(streams) > 1:
if not self.auto_pick:
result = xbmcgui.Dialog().select(i18n('choose_stream'), [e[1] for e in streams])
if result > -1:
return streams[result][0] + '|User-Agent=%s' % (scraper_utils.get_ua())
else:
best_stream = ''
best_q = 0
for stream in streams:
stream_url, label = stream
if Q_ORDER[scraper_utils.height_get_quality(label)] > best_q:
best_q = Q_ORDER[scraper_utils.height_get_quality(label)]
best_stream = stream_url
if best_stream:
return best_stream + '|User-Agent=%s' % (scraper_utils.get_ua())
elif streams:
return streams[0][0] + '|User-Agent=%s' % (scraper_utils.get_ua())
示例2: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, require_debrid=True, cache_limit=.5)
title = dom_parser.parse_dom(html, 'title')
if title:
title = re.sub('^\[ST\]\s*–\s*', '', title[0])
meta = scraper_utils.parse_episode_link(title)
page_quality = scraper_utils.height_get_quality(meta['height'])
else:
page_quality = QUALITIES.HIGH
fragment = dom_parser.parse_dom(html, 'section', {'class': '[^"]*entry-content[^"]*'})
if fragment:
for section in dom_parser.parse_dom(fragment[0], 'p'):
match = re.search('([^<]*)', section)
meta = scraper_utils.parse_episode_link(match.group(1))
if meta['episode'] != '-1' or meta['airdate']:
section_quality = scraper_utils.height_get_quality(meta['height'])
else:
section_quality = page_quality
if Q_ORDER[section_quality] < Q_ORDER[page_quality]:
quality = section_quality
else:
quality = page_quality
for stream_url in dom_parser.parse_dom(section, 'a', ret='href'):
host = urlparse.urlparse(stream_url).hostname
hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'quality': quality, 'direct': False}
hosters.append(hoster)
return hosters
示例3: __get_links_from_json2
def __get_links_from_json2(self, url, page_url, video_type):
sources = {}
headers = {'Referer': page_url}
headers.update(XHR)
html = self._http_get(url, headers=headers, cache_limit=0)
js_data = scraper_utils.parse_json(html, url)
try:
playlist = js_data.get('playlist', [])
for source in playlist[0].get('sources', []):
stream_url = source['file']
if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
elif 'label' in source:
quality = scraper_utils.height_get_quality(source['label'])
else:
if video_type == VIDEO_TYPES.MOVIE:
meta = scraper_utils.parse_movie_link(stream_url)
else:
meta = scraper_utils.parse_episode_link(stream_url)
quality = scraper_utils.height_get_quality(meta['height'])
sources[stream_url] = {'quality': quality, 'direct': True}
logger.log('Adding stream: %s Quality: %s' % (stream_url, quality), log_utils.LOGDEBUG)
except Exception as e:
logger.log('Exception during yesmovies extract: %s' % (e), log_utils.LOGDEBUG)
return sources
示例4: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
sources = []
if source_url and source_url != FORCE_NO_MATCH:
page_url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=.25)
match = re.search('var\s+view_id\s*=\s*"([^"]+)', html)
if match:
view_data = {'id': match.group(1), 'tip': 'view', 'dil': 'or'}
html = self._http_get(self.ajax_url, data=view_data, headers=XHR, cache_limit=.25)
html = html.strip()
html = re.sub(r'\\n|\\t', '', html)
match = re.search('var\s+sources\s*=\s*(\[.*?\])', html)
if match:
raw_data = match.group(1)
raw_data = raw_data.replace('\\', '')
else:
raw_data = html
js_data = scraper_utils.parse_json(raw_data, self.ajax_url)
if 'data' in js_data:
src = dom_parser.parse_dom(js_data['data'], 'iframe', ret='src')
if src:
html = self._http_get(src[0], cache_limit=.25)
match = re.search('url=([^"]+)', html)
if match:
stream_url = match.group(1).replace('>', '')
sources.append({'label': '720p', 'file': stream_url})
direct = False
else:
src = dom_parser.parse_dom(html, 'iframe', ret='src')
if src:
sources.append({'label': '720p', 'file': src[0]})
direct = False
else:
for match in re.finditer('"file"\s*:\s*"([^"]+)"\s*,\s*"label"\s*:\s*"([^"]+)', html):
sources.append({'label': match.group(2), 'file': match.group(1)})
direct = True
else:
sources = js_data
direct = True
for source in sources:
stream_url = source['file'] + '|User-Agent=%s' % (scraper_utils.get_ua())
if direct:
host = self._get_direct_hostname(stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(source['label'])
else:
host = urlparse.urlparse(stream_url).hostname
quality = scraper_utils.height_get_quality(source['label'])
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct}
hosters.append(hoster)
return hosters
示例5: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if not source_url or source_url == FORCE_NO_MATCH: return hosters
page_url = scraper_utils.urljoin(self.base_url, source_url)
headers = {'Referer': page_url}
html = self._http_get(page_url, headers=headers, cache_limit=.5)
if video.video_type == VIDEO_TYPES.MOVIE:
fragment = dom_parser2.parse_dom(html, 'div', {'class': 'poster'})
if fragment:
movie_url = dom_parser2.parse_dom(fragment[0].content, 'a', req='href')
if movie_url:
page_url = scraper_utils.urljoin(self.base_url, movie_url[0].attrs['href'])
html = self._http_get(page_url, cache_limit=.5)
episodes = self.__get_episodes(html)
page_url = self.__get_best_page(episodes)
if not page_url:
return hosters
else:
page_url = scraper_utils.urljoin(self.base_url, page_url)
html = self._http_get(page_url, cache_limit=.5)
streams = dom_parser2.parse_dom(html, 'iframe', req='src')
if streams:
streams = [(attrs['src'], 480) for attrs, _content in streams]
direct = False
else:
streams = [(attrs['src'], attrs.get('data-res', 480)) for attrs, _content in dom_parser2.parse_dom(html, 'source', req=['src'])]
direct = True
headers = {'User-Agent': scraper_utils.get_ua(), 'Referer': page_url}
for stream_url, height in streams:
if 'video.php' in stream_url or 'moviexk.php' in stream_url:
if 'title=' in stream_url:
title = stream_url.split('title=')[-1]
stream_url = stream_url.replace(title, urllib.quote(title))
redir_url = self._http_get(stream_url, headers=headers, allow_redirect=False, method='HEAD', cache_limit=0)
if redir_url.startswith('http'):
redir_url = redir_url.replace(' ', '').split(';codec')[0]
stream_url = redir_url
else:
continue
if direct:
host = scraper_utils.get_direct_hostname(self, stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(height)
stream_url += scraper_utils.append_headers(headers)
else:
host = urlparse.urlparse(stream_url).hostname
quality = scraper_utils.height_get_quality(height)
source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': direct}
hosters.append(source)
return hosters
示例6: __get_links
def __get_links(self, url, video):
hosters = []
search_url = urlparse.urljoin(self.base_url, SEARCH_URL)
query = self.__translate_search(url)
result = self._http_get(search_url, data=query, allow_redirect=False, cache_limit=.5)
if 'files' in result:
for item in result['files']:
checks = [False] * 6
if 'type' not in item or item['type'].upper() != 'VIDEO': checks[0] = True
if 'is_ready' in item and item['is_ready'] != '1': checks[1] = True
if 'av_result' in item and item['av_result'] in ['warning', 'infected']: checks[2] = True
if 'video_info' not in item: checks[3] = True
if 'video_info' in item and item['video_info'] and not re.search('#0:(?:0|1)(?:\(eng\)|\(und\))?:\s*Audio:', item['video_info']): checks[4] = True
if video.video_type == VIDEO_TYPES.EPISODE:
sxe = '[. ][Ss]%02d[Ee]%02d[. ]' % (int(video.season), int(video.episode))
if not re.search(sxe, item['name']):
if video.ep_airdate:
airdate_pattern = '[. ]%s[. ]%02d[. ]%02d[. ]' % (video.ep_airdate.year, video.ep_airdate.month, video.ep_airdate.day)
if not re.search(airdate_pattern, item['name']): checks[5] = True
if any(checks):
log_utils.log('Furk.net result excluded: %s - |%s|' % (checks, item['name']), log_utils.LOGDEBUG)
continue
match = re.search('(\d{3,})\s?x\s?(\d{3,})', item['video_info'])
if match:
width, _ = match.groups()
quality = scraper_utils.width_get_quality(width)
else:
if video.video_type == VIDEO_TYPES.MOVIE:
_, _, height, _ = scraper_utils.parse_movie_link(item['name'])
quality = scraper_utils.height_get_quality(height)
elif video.video_type == VIDEO_TYPES.EPISODE:
_, _, _, height, _ = scraper_utils.parse_episode_link(item['name'])
if int(height) > -1:
quality = scraper_utils.height_get_quality(height)
else:
quality = QUALITIES.HIGH
else:
quality = QUALITIES.HIGH
if 'url_pls' in item:
size_gb = scraper_utils.format_size(int(item['size']), 'B')
if self.max_bytes and int(item['size']) > self.max_bytes:
log_utils.log('Result skipped, Too big: |%s| - %s (%s) > %s (%sGB)' % (item['name'], item['size'], size_gb, self.max_bytes, self.max_gb))
continue
stream_url = item['url_pls']
host = self._get_direct_hostname(stream_url)
hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': True}
hoster['size'] = size_gb
hoster['extra'] = item['name']
hosters.append(hoster)
else:
log_utils.log('Furk.net result skipped - no playlist: |%s|' % (json.dumps(item)), log_utils.LOGDEBUG)
return hosters
示例7: __get_quality
def __get_quality(self, item, video):
if 'width' in item:
return scraper_utils.width_get_quality(item['width'])
elif 'height' in item:
return scraper_utils.height_get_quality(item['height'])
else:
if video.video_type == VIDEO_TYPES.MOVIE:
_title, _year, height, _extra = scraper_utils.parse_movie_link(item['name'])
else:
_title, _season, _episode, height, _extra = scraper_utils.parse_episode_link(item['name'])
return scraper_utils.height_get_quality(height)
示例8: __get_quality
def __get_quality(self, item, video):
if 'width' in item and item['width']:
return scraper_utils.width_get_quality(item['width'])
elif 'height' in item and item['height']:
return scraper_utils.height_get_quality(item['height'])
elif 'name' in item:
if video.video_type == VIDEO_TYPES.MOVIE:
meta = scraper_utils.parse_movie_link(item['name'])
else:
meta = scraper_utils.parse_episode_link(item['name'])
return scraper_utils.height_get_quality(meta['height'])
else:
return QUALITIES.HIGH
示例9: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
pattern = '\$\.post\("([^"]+)"\s*,\s*\{(.*?)\}'
match = re.search(pattern, html)
if match:
post_url, post_data = match.groups()
data = self.__get_data(post_data)
html = self._http_get(post_url, data=data, cache_limit=.5)
js_result = scraper_utils.parse_json(html, post_url)
for key in js_result:
stream_url = js_result[key]
host = self._get_direct_hostname(stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(key)
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hosters.append(hoster)
return hosters
示例10: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
sources = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
is_3d = False
page_quality = QUALITIES.HD720
title = dom_parser.parse_dom(html, 'title')
if title:
title = title[0]
match = re.search('(\d{3,})p', title)
if match:
page_quality = scraper_utils.height_get_quality(match.group(1))
is_3d = True if re.search('\s+3D\s+', title) else False
fragments = dom_parser.parse_dom(html, 'div', {'class': 'txt-block'}) + dom_parser.parse_dom(html, 'li', {'class': 'elemento'})
for fragment in fragments:
for match in re.finditer('href="([^"]+)', fragment):
stream_url = match.group(1)
host = urlparse.urlparse(stream_url).hostname
q_str = dom_parser.parse_dom(fragment, 'span', {'class': 'd'})
q_str = q_str[0].upper() if q_str else ''
base_quality = QUALITY_MAP.get(q_str, page_quality)
quality = scraper_utils.get_quality(video, host, base_quality)
source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': False}
source['format'] = 'x265'
source['3D'] = is_3d
sources.append(source)
return sources
示例11: __get_gk_links
def __get_gk_links(self, html, page_url, video_type, episode):
sources = {}
for link in dom_parser.parse_dom(html, 'div', {'class': '[^"]*server_line[^"]*'}):
film_id = dom_parser.parse_dom(link, 'a', ret='data-film')
name_id = dom_parser.parse_dom(link, 'a', ret='data-name')
server_id = dom_parser.parse_dom(link, 'a', ret='data-server')
if film_id and name_id and server_id:
data = {'ipplugins': 1, 'ip_film': film_id[0], 'ip_server': server_id[0], 'ip_name': name_id[0]}
headers = {'Referer': page_url}
headers.update(XHR)
url = urlparse.urljoin(self.base_url, LINK_URL)
html = self._http_get(url, data=data, headers=headers, cache_limit=.25)
js_data = scraper_utils.parse_json(html, url)
if 's' in js_data and isinstance(js_data['s'], basestring):
url = urlparse.urljoin(self.base_url, LINK_URL3)
params = {'u': js_data['s'], 'w': '100%', 'h': 450, 's': js_data['v']}
html = self._http_get(url, params=params, headers=headers, cache_limit=.25)
js_data = scraper_utils.parse_json(html, url)
if 'data' in js_data and js_data['data']:
if isinstance(js_data['data'], basestring):
sources[js_data['data']] = QUALITIES.HIGH
else:
for link in js_data['data']:
stream_url = link['files']
if self._get_direct_hostname(stream_url) == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
elif 'quality' in link:
quality = scraper_utils.height_get_quality(link['quality'])
else:
quality = QUALITIES.HIGH
sources[stream_url] = quality
return sources
示例12: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if not source_url or source_url == FORCE_NO_MATCH: return hosters
page_url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=.5)
match = re.search('var\s*video_id="([^"]+)', html)
if not match: return hosters
video_id = match.group(1)
data = {'v': video_id}
headers = {'Referer': page_url}
headers.update(XHR)
html = self._http_get(INFO_URL, data=data, headers=headers, cache_limit=.5)
sources = scraper_utils.parse_json(html, INFO_URL)
for source in sources:
match = re.search('url=(.*)', sources[source])
if not match: continue
stream_url = urllib.unquote(match.group(1))
host = scraper_utils.get_direct_hostname(self, stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(source)
stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hosters.append(hoster)
return hosters
示例13: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
norm_title = scraper_utils.normalize_title(video.title)
if source_url and source_url != FORCE_NO_MATCH:
source_url = urlparse.urljoin(self.base_url, source_url)
for line in self._get_files(source_url, cache_limit=24):
if not line['directory']:
match = {}
if video.video_type == VIDEO_TYPES.MOVIE:
meta = scraper_utils.parse_movie_link(line['link'])
if norm_title in scraper_utils.normalize_title(meta['title']):
match = line
elif self.__episode_match(line, video):
match = line
meta = scraper_utils.parse_episode_link(line['link'])
if match:
if meta['dubbed']: continue
stream_url = match['url'] + '|User-Agent=%s' % (scraper_utils.get_ua())
stream_url = stream_url.replace(self.base_url, '')
quality = scraper_utils.height_get_quality(meta['height'])
hoster = {'multi-part': False, 'host': self._get_direct_hostname(stream_url), 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
if 'format' in meta: hoster['format'] = meta['format']
if 'size' in match: hoster['size'] = scraper_utils.format_size(int(match['size']))
hosters.append(hoster)
return hosters
示例14: __get_gk_links
def __get_gk_links(self, html, page_url, video_type, episode):
sources = {}
phimid = dom_parser.parse_dom(html, 'input', {'name': 'phimid'}, ret='value')
if phimid and video_type == VIDEO_TYPES.EPISODE:
url = urlparse.urljoin(self.tv_base_url, '/ajax.php')
data = {'ipos_server': 1, 'phimid': phimid[0], 'keyurl': episode}
headers = XHR
headers['Referer'] = page_url
html = self._http_get(url, data=data, headers=headers, cache_limit=.5)
for link in dom_parser.parse_dom(html, 'div', {'class': '[^"]*server_line[^"]*'}):
film_id = dom_parser.parse_dom(link, 'a', ret='data-film')
name_id = dom_parser.parse_dom(link, 'a', ret='data-name')
server_id = dom_parser.parse_dom(link, 'a', ret='data-server')
if film_id and name_id and server_id:
data = {'ipplugins': 1, 'ip_film': film_id[0], 'ip_server': server_id[0], 'ip_name': name_id[0]}
headers = XHR
headers['Referer'] = page_url
url = urlparse.urljoin(self.__get_base_url(video_type), LINK_URL)
html = self._http_get(url, data=data, headers=headers, cache_limit=.25)
js_data = scraper_utils.parse_json(html, url)
if 's' in js_data:
if isinstance(js_data['s'], basestring):
sources[js_data['s']] = QUALITIES.HIGH
else:
for link in js_data['s']:
stream_url = link['file']
if self._get_direct_hostname(stream_url) == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
elif 'label' in link:
quality = scraper_utils.height_get_quality(link['label'])
else:
quality = QUALITIES.HIGH
sources[stream_url] = quality
return sources
示例15: __get_cloud_links
def __get_cloud_links(self, html, page_url, sub):
hosters = []
html = html.replace('\\"', '"').replace('\\/', '/')
match = re.search("dizi_kapak_getir\('([^']+)", html)
if match:
ep_id = match.group(1)
for script_url in dom_parser.parse_dom(html, 'script', {'data-cfasync': 'false'}, ret='src'):
html = self._http_get(script_url, cache_limit=24)
match1 = re.search("var\s+kapak_url\s*=\s*'([^']+)", html)
match2 = re.search("var\s+aCtkp\s*=\s*'([^']+)", html)
if match1 and match2:
link_url = '%s?fileid=%s&access_token=%s' % (match1.group(1), ep_id, match2.group(1))
headers = {'Referer': page_url}
html = self._http_get(link_url, headers=headers, cache_limit=.5)
js_data = scraper_utils.parse_json(html, link_url)
for variant in js_data.get('variants', {}):
stream_host = random.choice(variant.get('hosts', []))
if stream_host:
stream_url = STREAM_URL % (stream_host, variant['path'], scraper_utils.get_ua(), urllib.quote(page_url))
if not stream_url.startswith('http'):
stream_url = 'http://' + stream_url
host = self._get_direct_hostname(stream_url)
if 'width' in variant:
quality = scraper_utils.width_get_quality(variant['width'])
elif 'height' in variant:
quality = scraper_utils.height_get_quality(variant['height'])
else:
quality = QUALITIES.HIGH
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hoster['subs'] = sub
hosters.append(hoster)
return hosters