本文整理汇总了Python中sickbeard.helpers.make_session函数的典型用法代码示例。如果您正苦于以下问题:Python make_session函数的具体用法?Python make_session怎么用?Python make_session使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了make_session函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, name):
self.name = name
self.anime_only = False
self.bt_cache_urls = [
#'http://torcache.net/torrent/{torrent_hash}.torrent',
'http://torrentproject.se/torrent/{torrent_hash}.torrent',
'http://thetorrent.org/torrent/{torrent_hash}.torrent',
'http://btdig.com/torrent/{torrent_hash}.torrent',
# 'http://torrage.com/torrent/{torrent_hash}.torrent',
'http://itorrents.org/torrent/{torrent_hash}.torrent',
]
self.cache = TVCache(self)
self.enable_backlog = False
self.enable_daily = False
self.enabled = False
self.headers = {'User-Agent': UA_POOL.random}
self.proper_strings = ['PROPER|REPACK|REAL']
self.provider_type = None
self.public = False
self.search_fallback = False
self.search_mode = None
self.session = make_session()
self.show = None
self.supports_absolute_numbering = False
self.supports_backlog = True
self.url = ''
self.urls = {}
# Use and configure the attribute enable_cookies to show or hide the cookies input field per provider
self.enable_cookies = False
self.cookies = ''
self.rss_cookies = ''
shuffle(self.bt_cache_urls)
示例2: __init__
def __init__(self, show_id, title, indexer, indexer_id, cache_subfolder='recommended',
rating=None, votes=None, image_href=None, image_src=None):
"""
Create a show recommendation
:param show_id: as provided by the list provider
:param title: of the show as displayed in the recommended show page
:param indexer: used to map the show to
:param indexer_id: a mapped indexer_id for indexer
:param cache_subfolder: to store images
:param rating: of the show in percent
:param votes: number of votes
:param image_href: the href when clicked on the show image (poster)
:param image_src: the url to the "cached" image (poster)
"""
self.show_id = show_id
self.title = title
self.indexer = indexer
self.indexer_id = indexer_id
self.cache_subfolder = cache_subfolder
self.rating = rating
self.votes = votes
self.image_href = image_href
self.image_src = image_src
# Check if the show is currently already in the db
self.show_in_list = self.indexer_id in {show.indexerid for show in sickbeard.showList if show.indexerid}
self.session = helpers.make_session()
示例3: __init__
def __init__(self):
self.headers = {
'X-Plex-Device-Name': 'SickRage',
'X-Plex-Product': 'SickRage Notifier',
'X-Plex-Client-Identifier': sickbeard.common.USER_AGENT,
'X-Plex-Version': '2016.02.10'
}
self.session = make_session()
示例4: __init__
def __init__(self):
self.branch = sickbeard.BRANCH
if sickbeard.BRANCH == '':
self.branch = self._find_installed_branch()
self._cur_commit_hash = sickbeard.CUR_COMMIT_HASH
self._newest_commit_hash = None
self._num_commits_behind = 0
self.session = helpers.make_session()
示例5: __init__
def __init__(self):
"""Get a list of most popular TV series from imdb."""
# Use akas.imdb.com, just like the imdb lib.
self.url = 'http://akas.imdb.com/search/title'
self.params = {
'at': 0,
'sort': 'moviemeter',
'title_type': 'tv_series',
'year': '%s,%s' % (date.today().year - 1, date.today().year + 1),
}
self.session = helpers.make_session()
示例6: __init__
def __init__(self, name, host=None, username=None, password=None):
self.name = name
self.username = sickbeard.TORRENT_USERNAME if username is None else username
self.password = sickbeard.TORRENT_PASSWORD if password is None else password
self.host = sickbeard.TORRENT_HOST if host is None else host
self.rpcurl = sickbeard.TORRENT_RPCURL
self.url = None
self.response = None
self.auth = None
self.last_time = time.time()
self.session = helpers.make_session()
self.session.auth = (self.username, self.password)
self.session.cookies = cookielib.CookieJar()
示例7: update_network_dict
def update_network_dict():
"""Update timezone information from SR repositories"""
url = 'http://sickrage.github.io/sb_network_timezones/network_timezones.txt'
data = helpers.getURL(url, session=helpers.make_session(), returns='text')
if not data:
logger.log(u'Updating network timezones failed, this can happen from time to time. URL: {0}'.format(url), logger.WARNING)
load_network_dict()
return
d = {}
try:
for line in data.splitlines():
(key, val) = line.strip().rsplit(u':', 1)
if key and val:
d[key] = val
except (IOError, OSError):
pass
if not d:
logger.log(u'Parsing network timezones failed, not going to touch the db', logger.WARNING)
load_network_dict()
return
cache_db_con = db.DBConnection('cache.db')
network_list = dict(cache_db_con.select('SELECT * FROM network_timezones;'))
queries = []
for network, timezone in d.iteritems():
existing = network in network_list
if not existing:
queries.append(['INSERT OR IGNORE INTO network_timezones VALUES (?,?);', [network, timezone]])
elif network_list[network] != timezone:
queries.append(['UPDATE OR IGNORE network_timezones SET timezone = ? WHERE network_name = ?;', [timezone, network]])
if existing:
del network_list[network]
if network_list:
purged = [x for x in network_list]
queries.append(['DELETE FROM network_timezones WHERE network_name IN ({0});'.format(','.join(['?'] * len(purged))), purged])
if queries:
cache_db_con.mass_action(queries)
load_network_dict()
示例8: update_network_dict
def update_network_dict():
"""Update timezone information from Medusa repositories"""
url = 'https://cdn.pymedusa.com/sb_network_timezones/network_timezones.txt'
url_data = helpers.getURL(url, session=helpers.make_session(), returns='text')
if not url_data:
logger.log(u'Updating network timezones failed, this can happen from time to time. URL: %s' % url, logger.WARNING)
load_network_dict()
return
d = {}
try:
for line in url_data.splitlines():
(key, val) = line.strip().rsplit(u':', 1)
if key is None or val is None:
continue
d[key] = val
except (IOError, OSError):
pass
cache_db_con = db.DBConnection('cache.db')
network_list = dict(cache_db_con.select('SELECT * FROM network_timezones;'))
queries = []
for network, timezone in d.iteritems():
existing = network in network_list
if not existing:
queries.append(['INSERT OR IGNORE INTO network_timezones VALUES (?,?);', [network, timezone]])
elif network_list[network] != timezone:
queries.append(['UPDATE OR IGNORE network_timezones SET timezone = ? WHERE network_name = ?;', [timezone, network]])
if existing:
del network_list[network]
if network_list:
purged = [x for x in network_list]
queries.append(['DELETE FROM network_timezones WHERE network_name IN (%s);' % ','.join(['?'] * len(purged)), purged])
if queries:
cache_db_con.mass_action(queries)
load_network_dict()
示例9: __init__
def __init__(self, name, host=None, username=None, password=None):
"""
Initializes the client
:name: str:name of the client
:host: str:url or ip of the client
:username: str: username for authenticating with the client
:password: str: password for authentication with the client
"""
self.name = name
self.username = sickbeard.TORRENT_USERNAME if not username else username
self.password = sickbeard.TORRENT_PASSWORD if not password else password
self.host = sickbeard.TORRENT_HOST if not host else host
self.url = None
self.response = None
self.auth = None
self.last_time = time.time()
self.session = helpers.make_session()
self.session.auth = (self.username, self.password)
示例10: test_search
def test_search(): # pylint: disable=too-many-locals
"""
Test searching
"""
url = 'http://kickass.to/'
search_url = 'http://kickass.to/usearch/American%20Dad%21%20S08%20-S08E%20category%3Atv/?field=seeders&sorder=desc'
html = getURL(search_url, session=make_session(), returns='text')
if not html:
return
soup = BeautifulSoup(html, 'html5lib')
torrent_table = soup.find('table', attrs={'class': 'data'})
torrent_rows = torrent_table('tr') if torrent_table else []
# cleanup memory
soup.clear(True)
# Continue only if one Release is found
if len(torrent_rows) < 2:
print("The data returned does not contain any torrents")
return
for row in torrent_rows[1:]:
try:
link = urllib.parse.urljoin(url, (row.find('div', {'class': 'torrentname'})('a')[1])['href'])
_id = row.get('id')[-7:]
title = (row.find('div', {'class': 'torrentname'})('a')[1]).text \
or (row.find('div', {'class': 'torrentname'})('a')[2]).text
url = row.find('a', 'imagnet')['href']
verified = True if row.find('a', 'iverify') else False
trusted = True if row.find('img', {'alt': 'verified'}) else False
seeders = int(row('td')[-2].text)
leechers = int(row('td')[-1].text)
_ = link, _id, verified, trusted, seeders, leechers
except (AttributeError, TypeError):
continue
print(title)
示例11: sendNZB
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from requests.compat import urljoin
import datetime
import sickbeard
from sickbeard import helpers, logger
session = helpers.make_session()
def sendNZB(nzb): # pylint:disable=too-many-return-statements, too-many-branches, too-many-statements
'''
Sends an NZB to SABnzbd via the API.
:param nzb: The NZBSearchResult object to send to SAB
'''
category = sickbeard.SAB_CATEGORY
if nzb.show.is_anime:
category = sickbeard.SAB_CATEGORY_ANIME
# if it aired more than 7 days ago, override with the backlog category IDs
for curEp in nzb.episodes:
示例12: __init__
def __init__(self):
self.session = helpers.make_session()
self.url = 'https://api.pushbullet.com/v2/'
示例13: change_unrar_tool
def change_unrar_tool(unrar_tool, alt_unrar_tool):
# Check for failed unrar attempt, and remove it
# Must be done before unrar is ever called or the self-extractor opens and locks startup
bad_unrar = os.path.join(sickbeard.DATA_DIR, 'unrar.exe')
if os.path.exists(bad_unrar) and os.path.getsize(bad_unrar) == 447440:
try:
os.remove(bad_unrar)
except OSError as e:
logger.log("Unable to delete bad unrar.exe file {0}: {1}. You should delete it manually".format(bad_unrar, e.strerror), logger.WARNING)
try:
rarfile.custom_check(unrar_tool)
except (rarfile.RarCannotExec, rarfile.RarExecError, OSError, IOError):
# Let's just return right now if the defaults work
try:
# noinspection PyProtectedMember
test = rarfile._check_unrar_tool()
if test:
# These must always be set to something before returning
sickbeard.UNRAR_TOOL = rarfile.UNRAR_TOOL
sickbeard.ALT_UNRAR_TOOL = rarfile.ALT_TOOL
return True
except (rarfile.RarCannotExec, rarfile.RarExecError, OSError, IOError):
pass
if platform.system() == 'Windows':
# Look for WinRAR installations
found = False
winrar_path = 'WinRAR\\UnRAR.exe'
# Make a set of unique paths to check from existing environment variables
check_locations = {
os.path.join(location, winrar_path) for location in (
os.environ.get("ProgramW6432"), os.environ.get("ProgramFiles(x86)"),
os.environ.get("ProgramFiles"), re.sub(r'\s?\(x86\)', '', os.environ["ProgramFiles"])
) if location
}
check_locations.add(os.path.join(sickbeard.PROG_DIR, 'unrar\\unrar.exe'))
for check in check_locations:
if ek(os.path.isfile, check):
# Can use it?
try:
rarfile.custom_check(check)
unrar_tool = check
found = True
break
except (rarfile.RarCannotExec, rarfile.RarExecError, OSError, IOError):
found = False
# Download
if not found:
logger.log('Trying to download unrar.exe and set the path')
unrar_store = ek(os.path.join, sickbeard.PROG_DIR, 'unrar') # ./unrar (folder)
unrar_zip = ek(os.path.join, sickbeard.PROG_DIR, 'unrar_win.zip') # file download
if (helpers.download_file(
"http://sickrage.github.io/unrar/unrar_win.zip", filename=unrar_zip, session=helpers.make_session()
) and helpers.extractZip(archive=unrar_zip, targetDir=unrar_store)):
try:
ek(os.remove, unrar_zip)
except OSError as e:
logger.log("Unable to delete downloaded file {0}: {1}. You may delete it manually".format(unrar_zip, e.strerror))
check = os.path.join(unrar_store, "unrar.exe")
try:
rarfile.custom_check(check)
unrar_tool = check
logger.log('Successfully downloaded unrar.exe and set as unrar tool', )
except (rarfile.RarCannotExec, rarfile.RarExecError, OSError, IOError):
logger.log('Sorry, unrar was not set up correctly. Try installing WinRAR and make sure it is on the system PATH')
else:
logger.log('Unable to download unrar.exe')
# These must always be set to something before returning
sickbeard.UNRAR_TOOL = rarfile.UNRAR_TOOL = rarfile.ORIG_UNRAR_TOOL = unrar_tool
sickbeard.ALT_UNRAR_TOOL = rarfile.ALT_TOOL = alt_unrar_tool
try:
# noinspection PyProtectedMember
test = rarfile._check_unrar_tool()
except (rarfile.RarCannotExec, rarfile.RarExecError, OSError, IOError):
if sickbeard.UNPACK == 1:
logger.log('Disabling UNPACK setting because no unrar is installed.')
sickbeard.UNPACK = 0
test = False
return test
示例14: split_result
def split_result(obj):
"""
Split obj into separate episodes.
:param obj: to search for results
:return: a list of episode objects or an empty list
"""
url_data = helpers.getURL(obj.url, session=helpers.make_session(), returns='content')
if url_data is None:
logger.log(u"Unable to load url " + obj.url + ", can't download season NZB", logger.ERROR)
return []
# parse the season ep name
try:
parsed_obj = NameParser(False, showObj=obj.show).parse(obj.name)
except (InvalidNameException, InvalidShowException) as error:
logger.log(u"{}".format(error), logger.DEBUG)
return []
# bust it up
season = 1 if parsed_obj.season_number is None else parsed_obj.season_number
separate_nzbs, xmlns = get_season_nzbs(obj.name, url_data, season)
result_list = []
# TODO: Re-evaluate this whole section
# If we have valid results and hit an exception, we ignore the results found so far.
# Maybe we should return the results found or possibly continue with the next iteration of the loop
# Also maybe turn this into a function and generate the results_list with a list comprehension instead
for new_nzb in separate_nzbs:
logger.log(u"Split out " + new_nzb + " from " + obj.name, logger.DEBUG) # pylint: disable=no-member
# parse the name
try:
parsed_obj = NameParser(False, showObj=obj.show).parse(new_nzb)
except (InvalidNameException, InvalidShowException) as error:
logger.log(u"{}".format(error), logger.DEBUG)
return []
# make sure the result is sane
if (parsed_obj.season_number != season) or (parsed_obj.season_number is None and season != 1):
# pylint: disable=no-member
logger.log(u"Found " + new_nzb + " inside " + obj.name + " but it doesn't seem to belong to the same season, ignoring it",
logger.WARNING)
continue
elif len(parsed_obj.episode_numbers) == 0:
# pylint: disable=no-member
logger.log(u"Found " + new_nzb + " inside " + obj.name + " but it doesn't seem to be a valid episode NZB, ignoring it",
logger.WARNING)
continue
want_ep = True
for ep_num in parsed_obj.episode_numbers:
if not obj.extraInfo[0].wantEpisode(season, ep_num, obj.quality):
logger.log(u"Ignoring result: " + new_nzb, logger.DEBUG)
want_ep = False
break
if not want_ep:
continue
# get all the associated episode objects
ep_obj_list = [obj.extraInfo[0].getEpisode(season, ep) for ep in parsed_obj.episode_numbers]
# make a result
cur_obj = classes.NZBDataSearchResult(ep_obj_list)
cur_obj.name = new_nzb
cur_obj.provider = obj.provider
cur_obj.quality = obj.quality
cur_obj.extraInfo = [create_nzb_string(separate_nzbs[new_nzb], xmlns)]
result_list.append(cur_obj)
return result_list
示例15: index
def index(self):
try:
changes = helpers.getURL('https://cdn.pymedusa.com/sickrage-news/CHANGES.md', session=helpers.make_session(), returns='text')
except Exception:
logger.log('Could not load changes from repo, giving a link!', logger.DEBUG)
changes = 'Could not load changes from the repo. [Click here for CHANGES.md](https://cdn.pymedusa.com/sickrage-news/CHANGES.md)'
t = PageTemplate(rh=self, filename='markdown.mako')
data = markdown2.markdown(changes if changes else 'The was a problem connecting to github, please refresh and try again', extras=['header-ids'])
return t.render(title='Changelog', header='Changelog', topmenu='system', data=data, controller='changes', action='index')