本文整理汇总了Python中salts_lib.kodi.get_setting函数的典型用法代码示例。如果您正苦于以下问题:Python get_setting函数的具体用法?Python get_setting怎么用?Python get_setting使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_setting函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_episode_url
def _get_episode_url(self, show_url, video):
params = urlparse.parse_qs(show_url)
cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"tvshowid": %s, "season": %s, "filter": {"field": "%s", "operator": "is", "value": "%s"}, \
"limits": { "start" : 0, "end": 25 }, "properties" : ["title", "season", "episode", "file", "streamdetails"], "sort": { "order": "ascending", "method": "label", "ignorearticle": true }}, "id": "libTvShows"}'
base_url = 'video_type=%s&id=%s'
episodes = []
force_title = self._force_title(video)
if not force_title:
run = cmd % (params['id'][0], video.season, 'episode', video.episode)
meta = xbmc.executeJSONRPC(run)
meta = json.loads(meta)
log_utils.log('Episode Meta: %s' % (meta), log_utils.LOGDEBUG)
if 'result' in meta and 'episodes' in meta['result']:
episodes = meta['result']['episodes']
else:
log_utils.log('Skipping S&E matching as title search is forced on: %s' % (video.trakt_id), log_utils.LOGDEBUG)
if (force_title or kodi.get_setting('title-fallback') == 'true') and video.ep_title and not episodes:
run = cmd % (params['id'][0], video.season, 'title', video.ep_title)
meta = xbmc.executeJSONRPC(run)
meta = json.loads(meta)
log_utils.log('Episode Title Meta: %s' % (meta), log_utils.LOGDEBUG)
if 'result' in meta and 'episodes' in meta['result']:
episodes = meta['result']['episodes']
for episode in episodes:
if episode['file'].endswith('.strm'):
continue
return base_url % (video.video_type, episode['episodeid'])
示例2: _default_get_episode_url
def _default_get_episode_url(self, show_url, video, episode_pattern, title_pattern='', airdate_pattern='', data=None, headers=None):
log_utils.log('Default Episode Url: |%s|%s|%s|%s|' % (self.base_url, show_url, str(video).decode('utf-8', 'replace'), data), log_utils.LOGDEBUG)
url = urlparse.urljoin(self.base_url, show_url)
html = self._http_get(url, data=data, headers=headers, cache_limit=2)
if html:
force_title = self._force_title(video)
if not force_title:
match = re.search(episode_pattern, html, re.DOTALL)
if match:
return self._pathify_url(match.group(1))
if kodi.get_setting('airdate-fallback') == 'true' and airdate_pattern and video.ep_airdate:
airdate_pattern = airdate_pattern.replace('{year}', str(video.ep_airdate.year))
airdate_pattern = airdate_pattern.replace('{month}', str(video.ep_airdate.month))
airdate_pattern = airdate_pattern.replace('{p_month}', '%02d' % (video.ep_airdate.month))
airdate_pattern = airdate_pattern.replace('{month_name}', MONTHS[video.ep_airdate.month - 1])
airdate_pattern = airdate_pattern.replace('{short_month}', SHORT_MONS[video.ep_airdate.month - 1])
airdate_pattern = airdate_pattern.replace('{day}', str(video.ep_airdate.day))
airdate_pattern = airdate_pattern.replace('{p_day}', '%02d' % (video.ep_airdate.day))
log_utils.log('Air Date Pattern: %s' % (airdate_pattern), log_utils.LOGDEBUG)
match = re.search(airdate_pattern, html, re.DOTALL | re.I)
if match:
return self._pathify_url(match.group(1))
else:
log_utils.log('Skipping S&E matching as title search is forced on: %s' % (video.trakt_id), log_utils.LOGDEBUG)
if (force_title or kodi.get_setting('title-fallback') == 'true') and video.ep_title and title_pattern:
norm_title = self._normalize_title(video.ep_title)
for match in re.finditer(title_pattern, html, re.DOTALL | re.I):
episode = match.groupdict()
if norm_title == self._normalize_title(episode['title']):
return self._pathify_url(episode['url'])
示例3: __update_scraper_py
def __update_scraper_py(self):
try:
py_path = os.path.join(kodi.get_path(), 'scrapers', 'iflix_scraper.py')
self.exists = os.path.exists(py_path)
scraper_url = kodi.get_setting('%s-scraper_url' % (self.get_name()))
scraper_password = kodi.get_setting('%s-scraper_password' % (self.get_name()))
if scraper_url and scraper_password and (not self.exists or os.path.getmtime(py_path) < time.time() - (24 * 60 * 60)):
try:
req = urllib2.urlopen(scraper_url)
cipher_text = req.read()
except Exception as e:
log_utils.log('Failure during %s scraper get: %s' % (self.get_name(), e), log_utils.LOGWARNING)
return
if cipher_text:
scraper_key = hashlib.sha256(scraper_password).digest()
decrypter = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(scraper_key, IV))
new_py = decrypter.feed(cipher_text)
new_py += decrypter.feed()
old_py = ''
if os.path.exists(py_path):
with open(py_path, 'r') as f:
old_py = f.read()
log_utils.log('%s path: %s, new_py: %s, match: %s' % (self.get_name(), py_path, bool(new_py), new_py == old_py), log_utils.LOGDEBUG)
if old_py != new_py:
with open(py_path, 'w') as f:
f.write(new_py)
except Exception as e:
log_utils.log('Failure during %s scraper update: %s' % (self.get_name(), e), log_utils.LOGWARNING)
finally:
self.exists = os.path.exists(py_path)
示例4: update_all_scrapers
def update_all_scrapers():
try: last_check = int(kodi.get_setting('last_list_check'))
except: last_check = 0
now = int(time.time())
list_url = kodi.get_setting('scraper_url')
scraper_password = kodi.get_setting('scraper_password')
list_path = os.path.join(kodi.translate_path(kodi.get_profile()), 'scraper_list.txt')
exists = os.path.exists(list_path)
if list_url and scraper_password and (not exists or last_check < (now - (24 * 60 * 60))):
scraper_list = utils2.get_and_decrypt(list_url, scraper_password)
if scraper_list:
try:
with open(list_path, 'w') as f:
f.write(scraper_list)
kodi.set_setting('last_list_check', str(now))
kodi.set_setting('scraper_last_update', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(now)))
for line in scraper_list.split('\n'):
line = line.replace(' ', '')
if line:
scraper_url, filename = line.split(',')
if scraper_url.startswith('http'):
update_scraper(filename, scraper_url)
except Exception as e:
log_utils.log('Exception during scraper update: %s' % (e), log_utils.LOGWARNING)
示例5: _get_episode_url
def _get_episode_url(self, show_url, video):
url = urlparse.urljoin(self.base_url, show_url)
html = self._http_get(url, cache_limit=2)
if html:
force_title = scraper_utils.force_title(video)
episodes = dom_parser.parse_dom(html, 'div', {'class': '\s*el-item\s*'})
if not force_title:
episode_pattern = 'href="([^"]*-[sS]%02d[eE]%02d(?!\d)[^"]*)' % (int(video.season), int(video.episode))
match = re.search(episode_pattern, html)
if match:
return scraper_utils.pathify_url(match.group(1))
if kodi.get_setting('airdate-fallback') == 'true' and video.ep_airdate:
airdate_pattern = '%02d-%02d-%d' % (video.ep_airdate.day, video.ep_airdate.month, video.ep_airdate.year)
for episode in episodes:
ep_url = dom_parser.parse_dom(episode, 'a', ret='href')
ep_airdate = dom_parser.parse_dom(episode, 'div', {'class': 'date'})
if ep_url and ep_airdate:
ep_airdate = ep_airdate[0].strip()
if airdate_pattern == ep_airdate:
return scraper_utils.pathify_url(ep_url[0])
if (force_title or kodi.get_setting('title-fallback') == 'true') and video.ep_title:
norm_title = scraper_utils.normalize_title(video.ep_title)
for episode in episodes:
ep_url = dom_parser.parse_dom(episode, 'a', ret='href')
ep_title = dom_parser.parse_dom(episode, 'div', {'class': 'e-name'})
if ep_url and ep_title and norm_title == scraper_utils.normalize_title(ep_title[0]):
return scraper_utils.pathify_url(ep_url[0])
示例6: _set_cookies
def _set_cookies(self, base_url, cookies):
cookie_file = os.path.join(COOKIEPATH, "%s_cookies.lwp" % (self.get_name()))
cj = cookielib.LWPCookieJar(cookie_file)
try:
cj.load(ignore_discard=True)
except:
pass
if kodi.get_setting("cookie_debug") == "true":
log_utils.log("Before Cookies: %s - %s" % (self, scraper_utils.cookies_as_str(cj)), log_utils.LOGDEBUG)
domain = urlparse.urlsplit(base_url).hostname
for key in cookies:
c = cookielib.Cookie(
0,
key,
str(cookies[key]),
port=None,
port_specified=False,
domain=domain,
domain_specified=True,
domain_initial_dot=False,
path="/",
path_specified=True,
secure=False,
expires=None,
discard=False,
comment=None,
comment_url=None,
rest={},
)
cj.set_cookie(c)
cj.save(ignore_discard=True)
if kodi.get_setting("cookie_debug") == "true":
log_utils.log("After Cookies: %s - %s" % (self, scraper_utils.cookies_as_str(cj)), log_utils.LOGDEBUG)
return cj
示例7: _get_episode_url
def _get_episode_url(self, show_url, video):
url = urlparse.urljoin(self.base_url, show_url)
html = self._http_get(url, cache_limit=8)
pattern = "<a[^>]*class='dropdown-toggle'[^>]*>Season\s+%s<(.*?)<li\s+class='divider'>" % (video.season)
match = re.search(pattern, html, re.DOTALL)
if match:
fragment = match.group(1)
ep_ids = dom_parser.parse_dom(fragment, 'a', {'id': 'epiloader'}, ret='class')
episodes = dom_parser.parse_dom(fragment, 'a', {'id': 'epiloader'})
airdates = dom_parser.parse_dom(fragment, 'span', {'class': 'airdate'})
ep_airdate = video.ep_airdate.strftime('%Y-%m-%d') if isinstance(video.ep_airdate, datetime.date) else ''
norm_title = scraper_utils.normalize_title(video.ep_title)
num_id, airdate_id, title_id = '', '', ''
for episode, airdate, ep_id in zip(episodes, airdates, ep_ids):
if ep_airdate and ep_airdate == airdate: airdate_id = ep_id
match = re.search('(?:<span[^>]*>)?(\d+)\.\s*([^<]+)', episode)
if match:
ep_num, ep_title = match.groups()
if int(ep_num) == int(video.episode): num_id = ep_id
if norm_title and norm_title in scraper_utils.normalize_title(ep_title): title_id = ep_id
best_id = ''
if not scraper_utils.force_title(video):
if num_id: best_id = num_id
if kodi.get_setting('airdate-fallback') == 'true' and airdate_id: best_id = airdate_id
if kodi.get_setting('title-fallback') == 'true' and title_id: best_id = title_id
else:
if title_id: best_id = title_id
if best_id:
return EP_URL % (best_id)
示例8: __init__
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
self.username = kodi.get_setting('%s-username' % (self.get_name()))
self.password = kodi.get_setting('%s-password' % (self.get_name()))
self.max_results = int(kodi.get_setting('%s-result_limit' % (self.get_name())))
self.max_gb = kodi.get_setting('%s-size_limit' % (self.get_name()))
self.max_bytes = int(self.max_gb) * 1024 * 1024 * 1024
示例9: __init__
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
self.last_call = 0
device_id = kodi.get_setting('%s-device_id' % (self.get_name()))
if device_id not in ['', '0']:
self.device_id = device_id
else:
self.device_id = None
示例10: get_ua
def get_ua():
try: last_gen = int(kodi.get_setting('last_ua_create'))
except: last_gen = 0
if not kodi.get_setting('current_ua') or last_gen < (time.time() - (7 * 24 * 60 * 60)):
index = random.randrange(len(RAND_UAS))
user_agent = RAND_UAS[index].format(win_ver=random.choice(WIN_VERS), feature=random.choice(FEATURES), br_ver=random.choice(BR_VERS[index]))
log_utils.log('Creating New User Agent: %s' % (user_agent), log_utils.LOGDEBUG)
kodi.set_setting('current_ua', user_agent)
kodi.set_setting('last_ua_create', str(int(time.time())))
else:
user_agent = kodi.get_setting('current_ua')
return user_agent
示例11: __init__
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
if kodi.get_setting('%s-use_https' % (self.__class__.base_name)) == 'true':
scheme = 'https'
prefix = 'www'
else:
scheme = 'http'
prefix = 'http'
base_url = kodi.get_setting('%s-base_url' % (self.__class__.base_name))
self.base_url = scheme + '://' + prefix + '.' + base_url
self.username = kodi.get_setting('%s-username' % (self.__class__.base_name))
self.password = kodi.get_setting('%s-password' % (self.__class__.base_name))
示例12: __match_episode
def __match_episode(self, video, norm_title, title, hash_id=None):
sxe_patterns = [
'(.*?)[._ -]s([0-9]+)[._ -]*e([0-9]+)',
'(.*?)[._ -]([0-9]+)x([0-9]+)',
'(.*?)[._ -]([0-9]+)([0-9][0-9])',
'(.*?)[._ -]?season[._ -]*([0-9]+)[._ -]*-?[._ -]*episode[._ -]*([0-9]+)',
'(.*?)[._ -]\[s([0-9]+)\][._ -]*\[e([0-9]+)\]',
'(.*?)[._ -]s([0-9]+)[._ -]*ep([0-9]+)']
show_title = ''
for pattern in sxe_patterns:
match = re.search(pattern, title, re.I)
if match:
temp_title, season, episode = match.groups()
if int(season) == int(video.season) and int(episode) == int(video.episode):
show_title = temp_title
break
else:
airdate_fallback = kodi.get_setting('airdate-fallback') == 'true' and video.ep_airdate
if video.ep_airdate and airdate_fallback:
airdate_pattern = '(.*?)[. _]%s[. _]%02d[. _]%02d[. _]' % (video.ep_airdate.year, video.ep_airdate.month, video.ep_airdate.day)
match = re.search(airdate_pattern, title)
if match:
show_title = match.group(1)
if show_title and norm_title in scraper_utils.normalize_title(show_title):
return 'hash=%s' % (hash_id)
示例13: _get_episode_url
def _get_episode_url(self, show_url, video):
sxe = '.S%02dE%02d.' % (int(video.season), int(video.episode))
force_title = scraper_utils.force_title(video)
title_fallback = kodi.get_setting('title-fallback') == 'true'
norm_title = scraper_utils.normalize_title(video.ep_title)
try: ep_airdate = video.ep_airdate.strftime('.%Y.%m.%d.')
except: ep_airdate = ''
page_url = [show_url]
too_old = False
while page_url and not too_old:
url = urlparse.urljoin(self.base_url, page_url[0])
html = self._http_get(url, require_debrid=True, cache_limit=1)
headings = re.findall('<h2>\s*<a\s+href="([^"]+)[^>]+>(.*?)</a>', html)
posts = dom_parser.parse_dom(html, 'div', {'id': 'post-\d+'})
for heading, post in zip(headings, posts):
if self.__too_old(post):
too_old = True
break
if CATEGORIES[VIDEO_TYPES.TVSHOW] in post and show_url in post:
url, title = heading
if not force_title:
if (sxe in title) or (ep_airdate and ep_airdate in title):
return scraper_utils.pathify_url(url)
else:
if title_fallback and norm_title:
match = re.search('<strong>(.*?)</strong>', post)
if match and norm_title == scraper_utils.normalize_title(match.group(1)):
return scraper_utils.pathify_url(url)
page_url = dom_parser.parse_dom(html, 'a', {'class': 'nextpostslink'}, ret='href')
示例14: get_sources
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
page_url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=.5)
page_urls = [page_url]
if kodi.get_setting('scraper_url'):
page_urls += self.__get_page_urls(html)
for page_url in page_urls:
html = self._http_get(page_url, cache_limit=.5)
subs = 'Turkish Subtitles'
fragment = dom_parser.parse_dom(html, 'li', {'class': 'active'})
if fragment:
frag_class = dom_parser.parse_dom(fragment[0], 'span', ret='class')
if frag_class:
if frag_class[0] == 'icon-en':
subs = 'English Subtitles'
elif frag_class[0] == 'icon-orj':
subs = ''
hosters += self.__get_cloud_links(html, page_url, subs)
hosters += self.__get_embedded_links(html, subs)
hosters += self.__get_iframe_links(html, subs)
return hosters
示例15: _get_episode_url
def _get_episode_url(self, show_url, video):
url = urlparse.urljoin(self.base_url, show_url)
html = self._http_get(url, cache_limit=1)
match = re.search("var\s+id\s*=\s*'?(\d+)'?", html)
if match:
show_id = match.group(1)
season_url = SEASON_URL % (show_id, video.season, str(int(time.time()) * 1000))
season_url = urlparse.urljoin(self.base_url, season_url)
html = self._http_get(season_url, cache_limit=1)
try:
js_data = json.loads(html)
except ValueError:
log_utils.log("Invalid JSON returned: %s: %s" % (url, html), log_utils.LOGWARNING)
else:
force_title = self._force_title(video)
if not force_title:
for episode in js_data:
if int(episode["episode_number"]) == int(video.episode):
return LINK_URL % (show_id, video.season, episode["episode_number"])
if (force_title or kodi.get_setting("title-fallback") == "true") and video.ep_title:
norm_title = self._normalize_title(video.ep_title)
for episode in js_data:
if norm_title == self._normalize_title(episode["title"]):
return LINK_URL % (show_id, video.season, episode["episode_number"])