本文整理汇总了Python中dudehere.routines.scrapers.ScraperResult.size方法的典型用法代码示例。如果您正苦于以下问题:Python ScraperResult.size方法的具体用法?Python ScraperResult.size怎么用?Python ScraperResult.size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类dudehere.routines.scrapers.ScraperResult
的用法示例。
在下文中一共展示了ScraperResult.size方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_results
# 需要导入模块: from dudehere.routines.scrapers import ScraperResult [as 别名]
# 或者: from dudehere.routines.scrapers.ScraperResult import size [as 别名]
def process_results(self, path):
results = []
url = "%s://%s" % (self.service, path)
result = ScraperResult(self.service, 'VideoLibrary', url)
result.quality = QUALITY.LOCAL
result.size = vfs.get_size(path)
result.text = path
parts = vfs.path_parts(path)
result.extension = parts['extension']
results.append(result)
return results
示例2: get_resolved_url
# 需要导入模块: from dudehere.routines.scrapers import ScraperResult [as 别名]
# 或者: from dudehere.routines.scrapers.ScraperResult import size [as 别名]
def get_resolved_url(self, id):
self.search_results = []
resolved_url = ''
api_key = self._login()
params = {"type": "video", "id": id, "api_key": api_key, 't_files': 1}
results = self.request("/file/get", params)
if results=='':
return False
files = results['files'][0]['t_files']
for f in files:
if re.search('^video/', f['ct']):
title = f['name']
result = ScraperResult(self.service, 'furk.net', f['url_dl'], title)
result.size = self.format_size(f['size'])
result.bitrate = f['bitrate']
self.search_results.append(result)
resolved_url = self.select_stream()
return resolved_url
示例3: process_results
# 需要导入模块: from dudehere.routines.scrapers import ScraperResult [as 别名]
# 或者: from dudehere.routines.scrapers.ScraperResult import size [as 别名]
def process_results(self, data):
results = []
for result in data['result']:
title = self.normalize(result['title'])
sourcetitle = self.normalize(result['sourcetitle'])
hoster = result['hosterurls']
extension = result['extension']
size = result['sizeinternal']
extension = result['extension']
host_name = result['hostername']
hosts = result['hosterurls']
for host in hosts:
if host_name in self.domains:
url = "%s://%s" % (self.service, host['url'])
quality = self.test_quality(title+sourcetitle+self.normalize(url))
result = ScraperResult(self.service, host_name, url, title)
result.quality = quality
result.size = size
result.extension = extension
results.append(result)
return results
示例4: process_results
# 需要导入模块: from dudehere.routines.scrapers import ScraperResult [as 别名]
# 或者: from dudehere.routines.scrapers.ScraperResult import size [as 别名]
def process_results(self, data):
results = []
if 'files' not in data.keys() : return results
files = data['files']
extension = re.compile('(\.MKV)|(\.AVI)|(\.MP4)', re.IGNORECASE)
for f in files:
if f['type'] == 'video':
url = "furk://%s" % f['id']
title = f['name']
result = ScraperResult(self.service, 'furk.net', url, title)
result.quality = self.test_quality(title)
result.size = int(f['size'])
bitrate = re.search('bitrate: (.+?) kb/s', f['video_info'])
if bitrate: result.bitrate = bitrate.group(1)
ext = extension.search(title)
if ext:
if ext.group(1): result.extension = 'MKV'
if ext.group(2): result.extension = 'AVI'
if ext.group(3): result.extension = 'MP4'
results.append(result)
return results
示例5: process_movie_results
# 需要导入模块: from dudehere.routines.scrapers import ScraperResult [as 别名]
# 或者: from dudehere.routines.scrapers.ScraperResult import size [as 别名]
def process_movie_results(self, soup):
results = []
block = soup.find('div', {"class": "postContent"})
block_string = str(block)
filename = re.search('Release Name:</strong> (.+?)<br />', block_string)
if filename:
filename = filename.group(1)
else:
filename = self.service
size = re.search('\s(\d+\.??\d+)\s??(MB|GB)', block_string)
if size:
if size.group(2) == 'MB':
size = int(size.group(1)) * 1000 * 1000
else:
size = float(size.group(1)) * 1000 * 1000 * 1000
blocks = block.findAll('p')
for block in blocks:
test = block.findChildren()
if test[0].text == 'Download:':
break
files = block.findAll('a')
for file in files:
url = file['href']
if file.text.lower() not in self.table or 'nfo.rlsbb.com' in url: continue
quality = self.test_quality(url, default=QUALITY.HIGH)
url = "%s://%s" % (self.service, url)
host_name = self.table[file.text.lower()]
result = ScraperResult(self.service, host_name, url, filename)
result.quality = quality
if size:
result.size = size
results.append(result)
return results
示例6: process_results
# 需要导入模块: from dudehere.routines.scrapers import ScraperResult [as 别名]
# 或者: from dudehere.routines.scrapers.ScraperResult import size [as 别名]
def process_results(self, data, re_test=None):
results = []
for result in data["result"]:
title = self.normalize(result["title"])
if re_test:
if re_test.search(title) is None:
continue
sourcetitle = self.normalize(result["sourcetitle"])
hoster = result["hosterurls"]
extension = result["extension"]
size = result["sizeinternal"]
extension = result["extension"]
host_name = result["hostername"]
hosts = result["hosterurls"]
for host in hosts:
if self.filter_host(host_name):
url = "%s://%s" % (self.service, host["url"])
quality = self.test_quality(title + sourcetitle + self.normalize(url))
result = ScraperResult(self.service, host_name, url, title)
result.quality = quality
result.size = int(size)
result.extension = extension
results.append(result)
return results