本文整理汇总了Python中dudehere.routines.scrapers.ScraperResult.extension方法的典型用法代码示例。如果您正苦于以下问题:Python ScraperResult.extension方法的具体用法?Python ScraperResult.extension怎么用?Python ScraperResult.extension使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类dudehere.routines.scrapers.ScraperResult
的用法示例。
在下文中一共展示了ScraperResult.extension方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_results
# 需要导入模块: from dudehere.routines.scrapers import ScraperResult [as 别名]
# 或者: from dudehere.routines.scrapers.ScraperResult import extension [as 别名]
def process_results(self, path):
results = []
url = "%s://%s" % (self.service, path)
result = ScraperResult(self.service, 'VideoLibrary', url)
result.quality = QUALITY.LOCAL
result.size = vfs.get_size(path)
result.text = path
parts = vfs.path_parts(path)
result.extension = parts['extension']
results.append(result)
return results
示例2: process_results
# 需要导入模块: from dudehere.routines.scrapers import ScraperResult [as 别名]
# 或者: from dudehere.routines.scrapers.ScraperResult import extension [as 别名]
def process_results(self, data):
results = []
if 'files' not in data.keys() : return results
files = data['files']
extension = re.compile('(\.MKV)|(\.AVI)|(\.MP4)', re.IGNORECASE)
for f in files:
if f['type'] == 'video':
url = "furk://%s" % f['id']
title = f['name']
result = ScraperResult(self.service, 'furk.net', url, title)
result.quality = self.test_quality(title)
result.size = int(f['size'])
bitrate = re.search('bitrate: (.+?) kb/s', f['video_info'])
if bitrate: result.bitrate = bitrate.group(1)
ext = extension.search(title)
if ext:
if ext.group(1): result.extension = 'MKV'
if ext.group(2): result.extension = 'AVI'
if ext.group(3): result.extension = 'MP4'
results.append(result)
return results
示例3: process_results
# 需要导入模块: from dudehere.routines.scrapers import ScraperResult [as 别名]
# 或者: from dudehere.routines.scrapers.ScraperResult import extension [as 别名]
def process_results(self, html):
results = []
pattern = "source src='([^']+)'\s+type='video/([^']+)"
for match in re.finditer(pattern, html):
url = match.group(1)
url = url.replace('&', '&')
temp = urlparse(url)
title = temp.path.split('/')[-1]
url += '|User-Agent=%s' % (self.get_user_agent())
url = "%s://%s" % (self.service, url)
host_name = 'ororo.tv'
extension = match.group(2)
result = ScraperResult(self.service, host_name, url, title)
result.quality = QUALITY.HD720
result.extension = extension
results.append(result)
return results
示例4: process_results
# 需要导入模块: from dudehere.routines.scrapers import ScraperResult [as 别名]
# 或者: from dudehere.routines.scrapers.ScraperResult import extension [as 别名]
def process_results(self, data):
results = []
for result in data['result']:
title = self.normalize(result['title'])
sourcetitle = self.normalize(result['sourcetitle'])
hoster = result['hosterurls']
extension = result['extension']
size = result['sizeinternal']
extension = result['extension']
host_name = result['hostername']
hosts = result['hosterurls']
for host in hosts:
if host_name in self.domains:
url = "%s://%s" % (self.service, host['url'])
quality = self.test_quality(title+sourcetitle+self.normalize(url))
result = ScraperResult(self.service, host_name, url, title)
result.quality = quality
result.size = size
result.extension = extension
results.append(result)
return results
示例5: process_results
# 需要导入模块: from dudehere.routines.scrapers import ScraperResult [as 别名]
# 或者: from dudehere.routines.scrapers.ScraperResult import extension [as 别名]
def process_results(self, data, re_test=None):
results = []
for result in data["result"]:
title = self.normalize(result["title"])
if re_test:
if re_test.search(title) is None:
continue
sourcetitle = self.normalize(result["sourcetitle"])
hoster = result["hosterurls"]
extension = result["extension"]
size = result["sizeinternal"]
extension = result["extension"]
host_name = result["hostername"]
hosts = result["hosterurls"]
for host in hosts:
if self.filter_host(host_name):
url = "%s://%s" % (self.service, host["url"])
quality = self.test_quality(title + sourcetitle + self.normalize(url))
result = ScraperResult(self.service, host_name, url, title)
result.quality = quality
result.size = int(size)
result.extension = extension
results.append(result)
return results