本文整理汇总了Python中sickbeard.helpers.download_file函数的典型用法代码示例。如果您正苦于以下问题:Python download_file函数的具体用法?Python download_file怎么用?Python download_file使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了download_file函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cache_image
def cache_image(self, image_url):
path = ek.ek(os.path.abspath, ek.ek(os.path.join, sickbeard.CACHE_DIR, 'images', 'imdb_popular'))
if not os.path.exists(path):
os.makedirs(path)
full_path = os.path.join(path, os.path.basename(image_url))
if not os.path.isfile(full_path):
helpers.download_file(image_url, full_path, session=self.session)
示例2: cache_image
def cache_image(self, image_url):
"""
Store cache of image in cache dir
:param image_url: Source URL
"""
path = ek(os.path.abspath, ek(os.path.join, sickbeard.CACHE_DIR, 'images', 'imdb_popular'))
if not ek(os.path.exists, path):
ek(os.makedirs, path)
full_path = ek(os.path.join, path, ek(os.path.basename, image_url))
if not ek(os.path.isfile, full_path):
helpers.download_file(image_url, full_path, session=self.session)
示例3: downloadResult
def downloadResult(self, result):
"""
Save the result to disk.
"""
# check for auth
if not self._doLogin():
return False
urls, filename = self._makeURL(result)
for url in urls:
logger.log(u"Downloading a result from " + self.name + " at " + url)
if helpers.download_file(url, filename, session=self.session):
if self._verify_download(filename):
logger.log(u"Saved result to " + filename, logger.INFO)
return True
else:
logger.log(u"Could not download %s" % url, logger.WARNING)
helpers._remove_file_failed(filename)
if len(urls):
logger.log(u"Failed to download any results", logger.WARNING)
return False
示例4: download_result
def download_result(self, result):
if not self.login():
return False
urls, filename = self._make_url(result)
for url in urls:
if 'NO_DOWNLOAD_NAME' in url:
continue
if url.startswith('http'):
self.headers.update({
'Referer': '/'.join(url.split('/')[:3]) + '/'
})
logger.log(u'Downloading a result from {0} at {1}'.format(self.name, url))
if url.endswith(GenericProvider.TORRENT) and filename.endswith(GenericProvider.NZB):
filename = replace_extension(filename, GenericProvider.TORRENT)
if download_file(url, filename, session=self.session, headers=self.headers, hooks={'response': self.get_url_hook}):
if self._verify_download(filename):
logger.log(u'Saved result to {0}'.format(filename), logger.INFO)
return True
logger.log(u'Could not download {0}'.format(url), logger.WARNING)
remove_file_failed(filename)
if urls:
logger.log(u'Failed to download any results', logger.WARNING)
return False
示例5: download_result
def download_result(self, result):
"""
Save the result to disk.
"""
# check for auth
if not self.login():
return False
urls, filename = self._make_url(result)
for url in urls:
# Search results don't return torrent files directly, it returns show sheets so we must parse showSheet to access torrent.
data = self.get_url(url, returns='text')
url_torrent = re.search(r'http://tumejorserie.com/descargar/.+\.torrent', data, re.DOTALL).group()
if url_torrent.startswith('http'):
self.headers.update({'Referer': '/'.join(url_torrent.split('/')[:3]) + '/'})
logger.log('Downloading a result from {}'.format(url))
if helpers.download_file(url_torrent, filename, session=self.session, headers=self.headers):
if self._verify_download(filename):
logger.log('Saved result to {}'.format(filename), logger.INFO)
return True
else:
logger.log('Could not download {}'.format(url), logger.WARNING)
helpers.remove_file_failed(filename)
if len(urls):
logger.log('Failed to download any results', logger.WARNING)
return False
示例6: download_result
def download_result(self, result):
if not self.login():
return False
urls, filename = self._make_url(result)
for url in urls:
if "NO_DOWNLOAD_NAME" in url:
continue
if url.startswith("http"):
self.headers.update({"Referer": "/".join(url.split("/")[:3]) + "/"})
logger.log(u"Downloading a result from {0} at {1}".format(self.name, url))
if url.endswith(GenericProvider.TORRENT) and filename.endswith(GenericProvider.NZB):
filename = replace_extension(filename, GenericProvider.TORRENT)
if download_file(
url, filename, session=self.session, headers=self.headers, hooks={"response": self.get_url_hook}
):
if self._verify_download(filename):
logger.log(u"Saved result to {0}".format(filename), logger.INFO)
return True
logger.log(u"Could not download {0}".format(url), logger.WARNING)
remove_file_failed(filename)
if urls:
logger.log(u"Failed to download any results", logger.WARNING)
return False
示例7: downloadResult
def downloadResult(self, result):
"""
Save the result to disk.
"""
# check for auth
if not self._doLogin():
return False
urls, filename = self._makeURL(result)
if self.proxy.isEnabled():
self.headers.update({"Referer": self.proxy.getProxyURL()})
elif "Referer" in self.headers:
self.headers.pop("Referer")
for url in urls:
if "NO_DOWNLOAD_NAME" in url:
continue
logger.log(u"Downloading a result from " + self.name + " at " + url)
if helpers.download_file(self.proxy._buildURL(url), filename, session=self.session, headers=self.headers):
if self._verify_download(filename):
logger.log(u"Saved result to " + filename, logger.INFO)
return True
else:
logger.log(u"Could not download %s" % url, logger.WARNING)
helpers._remove_file_failed(filename)
if len(urls):
logger.log(u"Failed to download any results", logger.WARNING)
return False
示例8: _update_zoneinfo
def _update_zoneinfo():
# now check if the zoneinfo needs update
url_zv = 'http://github.com/Prinz23/sb_network_timezones/raw/master/zoneinfo.txt'
url_data = helpers.getURL(url_zv)
if url_data is None:
# When urlData is None, trouble connecting to github
logger.log(u"Loading zoneinfo.txt failed. Unable to get URL: " + url_zv, logger.DEBUG)
return
if (lib.dateutil.zoneinfo.ZONEINFOFILE != None):
cur_zoneinfo = ek.ek(basename, lib.dateutil.zoneinfo.ZONEINFOFILE)
else:
cur_zoneinfo = None
(new_zoneinfo, zoneinfo_md5) = url_data.decode('utf-8').strip().rsplit(u' ')
if ((cur_zoneinfo != None) and (new_zoneinfo == cur_zoneinfo)):
return
# now load the new zoneinfo
url_tar = u'http://github.com/Prinz23/sb_network_timezones/raw/master/' + new_zoneinfo
zonefile = ek.ek(realpath, u'lib/dateutil/zoneinfo/' + new_zoneinfo)
zonefile_tmp = re.sub(r"\.tar\.gz$",'.tmp', zonefile)
if (os.path.exists(zonefile_tmp)):
try:
os.remove(zonefile_tmp)
except:
logger.log(u"Unable to delete: " + zonefile_tmp,logger.ERROR)
return
if not helpers.download_file(url_tar, zonefile_tmp):
return
new_hash = str(helpers.md5_for_file(zonefile_tmp))
if (zoneinfo_md5.upper() == new_hash.upper()):
logger.log(u"Updating timezone info with new one: " + new_zoneinfo,logger.MESSAGE)
try:
# remove the old zoneinfo file
if (cur_zoneinfo != None):
old_file = ek.ek(realpath, u'lib/dateutil/zoneinfo/' + cur_zoneinfo)
if (os.path.exists(old_file)):
os.remove(old_file)
# rename downloaded file
os.rename(zonefile_tmp,zonefile)
# load the new zoneinfo
reload(lib.dateutil.zoneinfo)
except:
_remove_zoneinfo_failed(zonefile_tmp)
return
else:
_remove_zoneinfo_failed(zonefile_tmp)
logger.log(u"MD5 HASH doesn't match: " + zoneinfo_md5.upper() + ' File: ' + new_hash.upper(),logger.ERROR)
return
示例9: downloadResult
def downloadResult(self, result):
"""
Save the result to disk.
"""
# check for auth
if not self._doLogin():
return False
if self.providerType == GenericProvider.TORRENT:
try:
torrent_hash = re.findall('urn:btih:([\w]{32,40})', result.url)[0].upper()
torrent_name = re.findall('dn=([^&]+)', result.url)[0]
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash)).upper()
if not torrent_hash:
logger.log("Unable to extract torrent hash from link: " + ex(result.url), logger.ERROR)
return False
urls = [
'http://torcache.net/torrent/' + torrent_hash + '.torrent',
'http://zoink.ch/torrent/' + torrent_name + '.torrent',
'http://torrage.com/torrent/' + torrent_hash + '.torrent',
]
except:
urls = [result.url]
filename = ek.ek(os.path.join, sickbeard.TORRENT_DIR,
helpers.sanitizeFileName(result.name) + '.' + self.providerType)
elif self.providerType == GenericProvider.NZB:
urls = [result.url]
filename = ek.ek(os.path.join, sickbeard.NZB_DIR,
helpers.sanitizeFileName(result.name) + '.' + self.providerType)
else:
return
for url in urls:
logger.log(u"Downloading a result from " + self.name + " at " + url)
if helpers.download_file(url, filename, session=self.session):
if self._verify_download(filename):
if self.providerType == GenericProvider.TORRENT:
logger.log(u"Saved magnet link to " + filename, logger.INFO)
else:
logger.log(u"Saved result to " + filename, logger.INFO)
return True
else:
logger.log(u"Could not download %s" % url, logger.WARNING)
helpers._remove_file_failed(filename)
if len(urls):
logger.log(u"Failed to download any results", logger.WARNING)
return False
示例10: cache_image
def cache_image(self, image_url):
"""
Store cache of image in cache dir
:param image_url: Source URL
"""
if not self.cache_subfolder:
return
self.image_src = ek(posixpath.join, 'images', self.cache_subfolder, ek(os.path.basename, image_url))
path = ek(os.path.abspath, ek(os.path.join, sickbeard.CACHE_DIR, 'images', self.cache_subfolder))
if not ek(os.path.exists, path):
ek(os.makedirs, path)
full_path = ek(posixpath.join, path, ek(os.path.basename, image_url))
if not ek(os.path.isfile, full_path):
helpers.download_file(image_url, full_path, session=self.session)
示例11: download_result
def download_result(self, result):
"""
Save the result to disk.
"""
# check for auth
if not self._do_login():
return False
if GenericProvider.TORRENT == self.providerType:
try:
torrent_hash = re.findall('urn:btih:([0-9a-f]{32,40})', result.url)[0].upper()
if 32 == len(torrent_hash):
torrent_hash = b16encode(b32decode(torrent_hash)).lower()
if not torrent_hash:
logger.log('Unable to extract torrent hash from link: ' + ex(result.url), logger.ERROR)
return False
urls = ['https://%s/%s.torrent' % (u, torrent_hash)
for u in ('torcache.net/torrent', 'torrage.com/torrent', 'getstrike.net/torrents/api/download')]
except:
urls = [result.url]
filename = ek.ek(os.path.join, sickbeard.TORRENT_DIR,
helpers.sanitizeFileName(result.name) + '.' + self.providerType)
elif GenericProvider.NZB == self.providerType:
urls = [result.url]
filename = ek.ek(os.path.join, sickbeard.NZB_DIR,
helpers.sanitizeFileName(result.name) + '.' + self.providerType)
else:
return
for url in urls:
if helpers.download_file(url, filename, session=self.session):
logger.log(u'Downloading a result from ' + self.name + ' at ' + url)
if GenericProvider.TORRENT == self.providerType:
logger.log(u'Saved magnet link to ' + filename, logger.MESSAGE)
else:
logger.log(u'Saved result to ' + filename, logger.MESSAGE)
if self._verify_download(filename):
return True
elif ek.ek(os.path.isfile, filename):
ek.ek(os.remove, filename)
logger.log(u'Failed to download result', logger.ERROR)
return False
示例12: downloadResult
def downloadResult(self, result):
"""
Save the result to disk.
"""
# check for auth
if not self._doLogin():
return False
urls, filename = self._makeURL(result)
if self.proxy.isEnabled():
self.headers.update({'Referer': self.proxy.getProxyURL()})
elif 'Referer' in self.headers:
self.headers.pop('Referer')
for url in urls:
if 'NO_DOWNLOAD_NAME' in url:
continue
if not self.proxy.isEnabled() and url.startswith('http'):
# Let's just set a referer for every .torrent/.nzb, should work as a cover-all without side-effects
self.headers.update({'Referer': '/'.join(url.split('/')[:3]) + '/'})
logger.log(u"Downloading a result from " + self.name + " at " + url)
# Support for Jackett/TorzNab
if url.endswith(GenericProvider.TORRENT) and filename.endswith(GenericProvider.NZB):
filename = filename.rsplit('.', 1)[0] + '.' + GenericProvider.TORRENT
if helpers.download_file(self.proxy._buildURL(url), filename, session=self.session, headers=self.headers):
if self._verify_download(filename):
logger.log(u"Saved result to " + filename, logger.INFO)
return True
else:
logger.log(u"Could not download %s" % url, logger.WARNING)
helpers._remove_file_failed(filename)
if len(urls):
logger.log(u"Failed to download any results", logger.WARNING)
return False
示例13: downloadResult
def downloadResult(self, result):
"""
Save the result to disk.
"""
# check for auth
if not self._doLogin():
return False
urls, filename = self._makeURL(result)
for url in urls:
if 'NO_DOWNLOAD_NAME' in url:
continue
if url.startswith('http'):
self.headers.update({'Referer': '/'.join(url.split('/')[:3]) + '/'})
logger.log(u"Downloading a result from " + self.name + " at " + url)
# Support for Jackett/TorzNab
if url.endswith(GenericProvider.TORRENT) and filename.endswith(GenericProvider.NZB):
filename = filename.rsplit('.', 1)[0] + '.' + GenericProvider.TORRENT
if helpers.download_file(url, filename, session=self.session, headers=self.headers):
if self._verify_download(filename):
logger.log(u"Saved result to " + filename, logger.INFO)
return True
else:
logger.log(u"Could not download %s" % url, logger.WARNING)
helpers.remove_file_failed(filename)
if len(urls):
logger.log(u"Failed to download any results", logger.WARNING)
return False
示例14: cache_image
def cache_image(self, image_url, image_path):
# Only cache if the file does not exist yet
if not ek(os.path.isfile, image_path):
helpers.download_file(image_url, image_path, session=self.session)
示例15: search_providers
#.........这里部分代码省略.........
'#%s, removing the single episode results from the list' % ep_num, logger.DEBUG)
del found_results[provider_id][ep_num]
# of all the single ep results narrow it down to the best one for each episode
final_results += set(multi_results.values())
quality_list = use_quality_list and (None, best_qualities)[any(best_qualities)] or None
for cur_ep in found_results[provider_id]:
if cur_ep in (MULTI_EP_RESULT, SEASON_RESULT):
continue
if 0 == len(found_results[provider_id][cur_ep]):
continue
best_result = pick_best_result(found_results[provider_id][cur_ep], show, quality_list,
filter_rls=orig_thread_name)
# if all results were rejected move on to the next episode
if not best_result:
continue
# filter out possible bad torrents from providers
if 'torrent' == best_result.resultType:
if not best_result.url.startswith('magnet') and None is not best_result.get_data_func:
best_result.url = best_result.get_data_func(best_result.url)
best_result.get_data_func = None # consume only once
if not best_result.url:
continue
if best_result.url.startswith('magnet'):
if 'blackhole' != sickbeard.TORRENT_METHOD:
best_result.content = None
else:
cache_file = ek.ek(os.path.join, sickbeard.CACHE_DIR or helpers._getTempDir(),
'%s.torrent' % (helpers.sanitizeFileName(best_result.name)))
if not helpers.download_file(best_result.url, cache_file, session=best_result.provider.session):
continue
try:
with open(cache_file, 'rb') as fh:
td = fh.read()
setattr(best_result, 'cache_file', cache_file)
except (StandardError, Exception):
continue
if getattr(best_result.provider, 'chk_td', None):
name = None
try:
hdr = re.findall('(\w+(\d+):)', td[0:6])[0]
x, v = len(hdr[0]), int(hdr[1])
while x < len(td):
y = x + v
name = 'name' == td[x: y]
w = re.findall('((?:i-?\d+e|e+|d|l+)*(\d+):)', td[y: y + 32])[0]
x, v = y + len(w[0]), int(w[1])
if name:
name = td[x: x + v]
break
except (StandardError, Exception):
continue
if name:
if not pass_show_wordlist_checks(name, show):
continue
if not show_name_helpers.pass_wordlist_checks(name, indexer_lookup=False):
logger.log('Ignored: %s (debug log has detail)' % name)
continue
best_result.name = name