本文整理汇总了Python中salts_lib.scraper_utils.gv_get_quality函数的典型用法代码示例。如果您正苦于以下问题:Python gv_get_quality函数的具体用法?Python gv_get_quality怎么用?Python gv_get_quality使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了gv_get_quality函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
fragment = dom_parser.parse_dom(html, 'div', {'class': '[^"]*screen[^"]*'})
if fragment:
js_src = dom_parser.parse_dom(fragment[0], 'script', ret='src')
if js_src:
js_url = urlparse.urljoin(self.base_url, js_src[0])
html = self._http_get(js_url, cache_limit=.5)
else:
html = fragment[0]
for match in re.finditer('<source[^>]+src="([^"]+)', html):
stream_url = match.group(1)
host = self._get_direct_hostname(stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
elif 'blogspot' in stream_url:
quality = scraper_utils.gv_get_quality(stream_url)
else:
_, _, height, _ = scraper_utils.parse_movie_link(stream_url)
quality = scraper_utils.height_get_quality(height)
stream_url += '|User-Agent=%s' % (scraper_utils.get_ua())
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hosters.append(hoster)
return hosters
示例2: __get_gk_links
def __get_gk_links(self, link, iframe_url):
sources = {}
data = {'link': link}
headers = XHR
headers.update({'Referer': iframe_url, 'User-Agent': USER_AGENT})
html = self._http_get(GK_URL, data=data, headers=headers, cache_limit=.25)
js_data = scraper_utils.parse_json(html, GK_URL)
if 'link' in js_data:
if isinstance(js_data['link'], basestring):
stream_url = js_data['link']
if self._get_direct_hostname(stream_url) == 'gvideo':
temp = self._parse_google(stream_url)
for source in temp:
sources[source] = {'quality': scraper_utils.gv_get_quality(source), 'direct': True}
else:
sources[stream_url] = {'quality': QUALITIES.HIGH, 'direct': False}
else:
for link in js_data['link']:
stream_url = link['link']
if self._get_direct_hostname(stream_url) == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
elif 'label' in link:
quality = scraper_utils.height_get_quality(link['label'])
else:
quality = QUALITIES.HIGH
sources[stream_url] = {'quality': quality, 'direct': True}
return sources
示例3: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
sources = {}
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=0.5)
for match in re.finditer("embeds\[(\d+)\]\s*=\s*'([^']+)", html):
match = re.search('src="([^"]+)', match.group(2))
if match:
iframe_url = match.group(1)
if "play-en.php" in iframe_url:
match = re.search('id=([^"&]+)', iframe_url)
if match:
proxy_link = match.group(1)
proxy_link = proxy_link.split("*", 1)[-1]
picasa_url = scraper_utils.gk_decrypt(self.get_name(), GK_KEY, proxy_link)
for stream_url in self._parse_google(picasa_url):
sources[stream_url] = {
"quality": scraper_utils.gv_get_quality(stream_url),
"direct": True,
}
else:
html = self._http_get(iframe_url, cache_limit=0.25)
temp_sources = self._parse_sources_list(html)
for source in temp_sources:
if "download.php" in source:
redir_html = self._http_get(source, allow_redirect=False, method="HEAD", cache_limit=0)
if redir_html.startswith("http"):
temp_sources[redir_html] = temp_sources[source]
del temp_sources[source]
sources.update(temp_sources)
for source in dom_parser.parse_dom(html, "source", {"type": "video/mp4"}, ret="src"):
sources[source] = {"quality": QUALITIES.HD720, "direct": True}
for source in sources:
host = self._get_direct_hostname(source)
stream_url = source + "|User-Agent=%s" % (scraper_utils.get_ua())
if host == "gvideo":
quality = scraper_utils.gv_get_quality(source)
else:
quality = sources[source]["quality"]
if quality not in Q_ORDER:
quality = QUALITY_MAP.get(sources[source]["quality"], QUALITIES.HIGH)
hoster = {
"multi-part": False,
"url": stream_url,
"host": host,
"class": self,
"quality": quality,
"views": None,
"rating": None,
"direct": True,
}
hosters.append(hoster)
return hosters
示例4: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
sources = {}
hosters = []
if not source_url or source_url == FORCE_NO_MATCH: return hosters
url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=0)
for match in re.finditer("embeds\[(\d+)\]\s*=\s*'([^']+)", html):
match = re.search('src="([^"]+)', match.group(2))
if match:
iframe_url = match.group(1)
if 'play-en.php' in iframe_url:
match = re.search('id=([^"&]+)', iframe_url)
if match:
proxy_link = match.group(1)
proxy_link = proxy_link.split('*', 1)[-1]
picasa_url = scraper_utils.gk_decrypt(self.get_name(), GK_KEY, proxy_link)
for stream_url in scraper_utils.parse_google(self, picasa_url):
sources[stream_url] = {'quality': scraper_utils.gv_get_quality(stream_url), 'direct': True}
else:
html = self._http_get(iframe_url, cache_limit=0)
temp_sources = scraper_utils.parse_sources_list(self, html)
for source in temp_sources:
if 'download.php' in source:
redir_html = self._http_get(source, allow_redirect=False, method='HEAD', cache_limit=0)
if redir_html.startswith('http'):
temp_sources[redir_html] = temp_sources[source]
del temp_sources[source]
sources.update(temp_sources)
for source in dom_parser2.parse_dom(html, 'source', {'type': 'video/mp4'}, req='src'):
sources[source.attrs['src']] = {'quality': QUALITIES.HD720, 'direct': True, 'referer': iframe_url}
for source, values in sources.iteritems():
host = scraper_utils.get_direct_hostname(self, source)
headers = {'User-Agent': scraper_utils.get_ua()}
if 'referer' in values: headers['Referer'] = values['referer']
stream_url = source + scraper_utils.append_headers(headers)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(source)
else:
quality = values['quality']
if quality not in Q_ORDER:
quality = QUALITY_MAP.get(values['quality'], QUALITIES.HIGH)
hoster = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': True}
hosters.append(hoster)
return hosters
示例5: __get_links
def __get_links(self, url):
sources = []
match = re.search('src="([^"]+)', url)
if match:
url = match.group(1).replace('\\/', '/')
html = self._http_get(url, cache_limit=0)
match = re.search('<script\s+src="([^\']+)\'\+(\d+)\+\'([^\']+)', html)
if match:
page_url = ''.join(match.groups())
page_url += str(random.random())
html = self._http_get(page_url, cache_limit=0)
for match in re.finditer('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"', html):
stream_url, height = match.groups()
stream_url = stream_url.replace('\\&', '&').replace('\\/', '/')
if 'v.asp' in stream_url and 'ok.ru' not in url:
stream_redirect = self._http_get(stream_url, allow_redirect=False, cache_limit=0)
if stream_redirect: stream_url = stream_redirect
if self._get_direct_hostname(stream_url) == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(height)
host = self._get_direct_hostname(stream_url)
stream_url += '|User-Agent=%s&Referer=%s' % (scraper_utils.get_ua(), urllib.quote(url))
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
sources.append(hoster)
return sources
示例6: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
page_url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=.5)
match = re.search('var\s*video_id="([^"]+)', html)
if match:
video_id = match.group(1)
url = urlparse.urljoin(self.base_url, VIDEO_URL)
data = {'v': video_id}
headers = XHR
headers['Referer'] = page_url
html = self._http_get(url, data=data, headers=headers, cache_limit=.5)
sources = scraper_utils.parse_json(html, url)
for source in sources:
match = re.search('url=(.*)', sources[source])
if match:
stream_url = urllib.unquote(match.group(1))
host = self._get_direct_hostname(stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(source)
stream_url += '|User-Agent=%s' % (scraper_utils.get_ua())
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hosters.append(hoster)
return hosters
示例7: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
sources = []
if source_url and source_url != FORCE_NO_MATCH:
page_url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=8)
q_str = dom_parser.parse_dom(html, 'div', {'class': 'poster-qulabel'})
if q_str:
q_str = q_str[0].replace(' ', '').upper()
page_quality = Q_MAP.get(q_str, QUALITIES.HIGH)
else:
page_quality = QUALITIES.HIGH
for fragment in dom_parser.parse_dom(html, 'div', {'class': 'tab_box'}):
match = re.search('file\s*:\s*"([^"]+)', fragment)
if match:
stream_url = match.group(1)
else:
stream_url = self.__get_ajax_sources(fragment, page_url)
if stream_url:
host = self._get_direct_hostname(stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = page_quality
stream_url += '|User-Agent=%s&Referer=%s' % (scraper_utils.get_ua(), urllib.quote(page_url))
source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': True}
sources.append(source)
return sources
示例8: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
sources = []
if not source_url or source_url == FORCE_NO_MATCH: return hosters
page_url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=1)
fragment = dom_parser2.parse_dom(html, 'div', {'class': 'player'})
if not fragment: return hosters
iframe_url = dom_parser2.parse_dom(fragment[0].content, 'iframe', req='src')
if not iframe_url: return hosters
html = self._http_get(iframe_url[0].attrs['src'], cache_limit=.25)
sources.append(self.__get_embedded_sources(html))
sources.append(self.__get_linked_sources(html))
for source in sources:
for stream_url in source['sources']:
host = scraper_utils.get_direct_hostname(self, stream_url)
if host == 'gvideo':
stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
quality = scraper_utils.gv_get_quality(stream_url)
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hoster['subs'] = source.get('subs', True)
hosters.append(hoster)
return hosters
示例9: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
sources = {}
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
fragment = dom_parser.parse_dom(html, 'div', {'class': 'meta-media'})
if fragment:
iframe_url = dom_parser.parse_dom(fragment[0], 'iframe', ret='src')
if iframe_url:
iframe_url = urlparse.urljoin(self.base_url, iframe_url[0])
html = self._http_get(iframe_url, cache_limit=.5)
for match in re.finditer('window.location.href\s*=\s*"([^"]+)', html):
stream_url = match.group(1)
host = self._get_direct_hostname(stream_url)
if host == 'gvideo':
sources[stream_url] = scraper_utils.gv_get_quality(stream_url)
else:
sources[source_url] = QUALITIES.HIGH
for source in sources:
host = self._get_direct_hostname(stream_url)
stream_url += '|User-Agent=%s' % (scraper_utils.get_ua())
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': sources[source], 'views': None, 'rating': None, 'url': source, 'direct': True}
hosters.append(hoster)
return hosters
示例10: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if not source_url or source_url == FORCE_NO_MATCH: return hosters
js_url = scraper_utils.urljoin(self.base_url, '/javascript/movies.js')
html = self._http_get(js_url, cache_limit=48)
if source_url.startswith('/'):
source_url = source_url[1:]
pattern = '''getElementById\(\s*"%s".*?play\(\s*'([^']+)''' % (source_url)
match = re.search(pattern, html, re.I)
if match:
stream_url = match.group(1)
if 'drive.google' in stream_url or 'docs.google' in stream_url:
sources = scraper_utils.parse_google(self, stream_url)
else:
sources = [stream_url]
for source in sources:
stream_url = source + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
host = scraper_utils.get_direct_hostname(self, source)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(source)
direct = True
elif 'youtube' in stream_url:
quality = QUALITIES.HD720
direct = False
host = 'youtube.com'
else:
quality = QUALITIES.HIGH
direct = True
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct}
hosters.append(hoster)
return hosters
示例11: __get_links_from_playlist
def __get_links_from_playlist(self, grab_url, headers):
sources = {}
grab_url = grab_url.replace('\\', '')
grab_html = self._http_get(grab_url, headers=headers, cache_limit=.5)
js_data = scraper_utils.parse_json(grab_html, grab_url)
try: playlist = js_data['playlist'][0]['sources']
except: playlist = []
for item in playlist:
stream_url = item.get('file')
if stream_url:
if stream_url.startswith('/'):
stream_url = scraper_utils.urljoin(self.base_url, stream_url)
redir_url = self._http_get(stream_url, headers=headers, allow_redirect=False, method='HEAD')
if redir_url.startswith('http'):
stream_url = redir_url
if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
elif 'label' in item:
quality = scraper_utils.height_get_quality(item['label'])
else:
quality = QUALITIES.HIGH
logger.log('Adding stream: %s Quality: %s' % (stream_url, quality), log_utils.LOGDEBUG)
sources[stream_url] = {'quality': quality, 'direct': True}
if not kodi.get_setting('scraper_url'): break
return sources
示例12: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
page_url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=.5)
# exit early if trailer
if re.search('Şu an fragman*', html, re.I):
return hosters
match = re.search('''url\s*:\s*"([^"]+)"\s*,\s*data:\s*["'](id=\d+)''', html)
if match:
url, data = match.groups()
url = urlparse.urljoin(self.base_url, url)
result = self._http_get(url, data=data, headers=XHR, cache_limit=.5)
for match in re.finditer('"videolink\d*"\s*:\s*"([^"]+)","videokalite\d*"\s*:\s*"?(\d+)p?', result):
stream_url, height = match.groups()
stream_url = stream_url.replace('\\/', '/')
host = self._get_direct_hostname(stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(height)
stream_url += '|User-Agent=%s&Referer=%s' % (scraper_utils.get_ua(), urllib.quote(page_url))
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hosters.append(hoster)
return hosters
示例13: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
match = re.search('<iframe[^>]+src="([^"]+watch=([^"]+))', html)
if match:
iframe_url, link_id = match.groups()
data = {'link': link_id}
headers = {'Referer': iframe_url}
gk_url = urlparse.urljoin(self.base_url, GK_URL)
html = self._http_get(gk_url, data=data, headers=headers, cache_limit=.5)
js_data = scraper_utils.parse_json(html, gk_url)
if 'link' in js_data:
for link in js_data['link']:
stream_url = link['link']
host = self._get_direct_hostname(stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(link['label'])
hoster = {'multi-part': False, 'url': stream_url, 'class': self, 'quality': quality, 'host': host, 'rating': None, 'views': None, 'direct': True}
hosters.append(hoster)
return hosters
示例14: get_sources
def get_sources(self, video):
hosters = []
source_url = self.get_url(video)
if not source_url or source_url == FORCE_NO_MATCH: return hosters
url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=8)
for _attrs, fragment in dom_parser2.parse_dom(html, 'div', {'class': 'movieplay'}):
iframe_src = dom_parser2.parse_dom(fragment, 'iframe', req='src')
if iframe_src:
iframe_src = iframe_src[0].attrs['src']
if re.search('o(pen)?load', iframe_src, re.I):
meta = scraper_utils.parse_movie_link(iframe_src)
quality = scraper_utils.height_get_quality(meta['height'])
links = {iframe_src: {'quality': quality, 'direct': False}}
else:
links = self.__get_links(iframe_src, url)
for link in links:
direct = links[link]['direct']
quality = links[link]['quality']
if direct:
host = scraper_utils.get_direct_hostname(self, link)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(link)
stream_url = link + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua(), 'Referer': url})
else:
host = urlparse.urlparse(link).hostname
stream_url = link
source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': direct}
hosters.append(source)
return hosters
示例15: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
sources = {}
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
fragment = dom_parser.parse_dom(html, 'div', {'class': 'meta-media'})
if fragment:
iframe_url = dom_parser.parse_dom(fragment[0], 'iframe', ret='src')
if iframe_url:
iframe_url = urlparse.urljoin(self.base_url, iframe_url[0])
html = self._http_get(iframe_url, cache_limit=.5)
for source in dom_parser.parse_dom(html, 'source', ret='src'):
source_url = urlparse.urljoin(self.base_url, source)
redir_url = self._http_get(source_url, allow_redirect=False, cache_limit=.5)
if redir_url.startswith('http'):
sources[redir_url] = scraper_utils.gv_get_quality(redir_url)
else:
sources[source_url] = QUALITIES.HIGH
for source in sources:
hoster = {'multi-part': False, 'host': self._get_direct_hostname(source), 'class': self, 'quality': sources[source], 'views': None, 'rating': None, 'url': source, 'direct': True}
hosters.append(hoster)
return hosters