本文整理汇总了Python中resources.lib.modules.log_utils.log函数的典型用法代码示例。如果您正苦于以下问题:Python log函数的具体用法?Python log怎么用?Python log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _getSearchData
def _getSearchData(self, query, possibleTitles, year, session, isMovie):
try:
searchURL = self.BASE_URL + ('/?' if isMovie else '/?tv=&') + urlencode({'search_keywords': query})
r = self._sessionGET(searchURL, session)
if not r.ok:
return None
bestGuessesURLs = []
soup = BeautifulSoup(r.content, 'html.parser')
mainDIV = soup.find('div', role='main')
for resultDIV in mainDIV.findAll('div', {'class': 'index_item'}, recursive=False):
# Search result titles in Primewire.gr are usually "[Name of Movie/TVShow] (yyyy)".
# Example: 'Star Wars Legends: Legacy of the Force (2015)'
match = re.search(r'(.*?)(?:\s\((\d{4})\))?$', resultDIV.a['title'].lower().strip())
resultTitle, resultYear = match.groups()
if resultTitle in possibleTitles:
if resultYear == year: # 'resultYear' = '(yyyy)', with parenthesis.
bestGuessesURLs.insert(0, resultDIV.a['href']) # Use year to make better guesses.
else:
bestGuessesURLs.append(resultDIV.a['href'])
if bestGuessesURLs:
return {
'pageURL': self.BASE_URL + bestGuessesURLs[0],
'UA': session.headers['User-Agent'],
'referer': searchURL,
'cookies': session.cookies.get_dict(),
}
else:
return None
except Exception:
failure = traceback.format_exc()
log_utils.log('PrimewireGR - Exception: \n' + str(failure))
return
示例2: sources
def sources(self, url, hostDict, hostprDict):
sources = []
if url == None: return
try:
OPEN = client.request(url)
headers = {'Origin':'http://hdpopcorns.co', 'Referer':url,
'X-Requested-With':'XMLHttpRequest', 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
try:
params = re.compile('FileName1080p.+?value="(.+?)".+?FileSize1080p.+?value="(.+?)".+?value="(.+?)"',re.DOTALL).findall(OPEN)
for param1, param2,param3 in params:
request_url = '%s/select-movie-quality.php' %(self.base_link)
form_data = {'FileName1080p':param1,'FileSize1080p':param2,'FSID1080p':param3}
link = requests.post(request_url, data=form_data, headers=headers,timeout=3).content
final_url = re.compile('<strong>1080p</strong>.+?href="(.+?)"',re.DOTALL).findall(link)[0]
sources.append({'source': 'DirectLink', 'quality': '1080p', 'language': 'en', 'url': final_url, 'direct': True, 'debridonly': False})
except:pass
try:
params = re.compile('FileName720p.+?value="(.+?)".+?FileSize720p".+?value="(.+?)".+?value="(.+?)"',re.DOTALL).findall(OPEN)
for param1, param2,param3 in params:
request_url = '%s/select-movie-quality.php' %(self.base_link)
form_data = {'FileName720p':param1,'FileSize720p':param2,'FSID720p':param3}
link = requests.post(request_url, data=form_data, headers=headers,timeout=3).content
final_url = re.compile('<strong>720p</strong>.+?href="(.+?)"',re.DOTALL).findall(link)[0]
sources.append({'source': 'DirectLink', 'quality': '720p', 'language': 'en', 'url': final_url, 'direct': True, 'debridonly': False})
except:pass
return sources
except:
failure = traceback.format_exc()
log_utils.log('Popcorn - Exception: \n' + str(failure))
return sources
示例3: movie
def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
search_url = self.search_link % (clean_title.replace('-','+'), year)
headers = {'Host': 'http://icefilms1.unblocked.sh',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Upgrade-Insecure-Requests': '1',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.8'}
r = client.request(search_url, headers=headers)
r = dom_parser2.parse_dom(r, 'td')
r = [dom_parser2.parse_dom(i, 'a', req='href') for i in r if "<div class='number'" in i.content]
r = [(urlparse.urljoin(self.base_url, i[0].attrs['href'])) for i in r if title.lower() in i[0].content.lower() and year in i[0].content]
url = r[0]
url = url[:-1]
url = url.split('?v=')[1]
url = self.list_url % url
return url
except:
failure = traceback.format_exc()
log_utils.log('IceFilms - Exception: \n' + str(failure))
return
示例4: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
for i in range(3):
result = client.request(url, timeout=10)
if not result == None: break
dom = dom_parser.parse_dom(result, 'div', attrs={'class':'links', 'id': 'noSubs'})
result = dom[0].content
links = re.compile('<tr\s*>\s*<td><i\s+class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',re.DOTALL).findall(result)
for link in links[:5]:
try:
url2 = urlparse.urljoin(self.base_link, link[1])
for i in range(2):
result2 = client.request(url2, timeout=3)
if not result2 == None: break
r = re.compile('href="([^"]+)"\s+class="action-btn').findall(result2)[0]
valid, hoster = source_utils.is_host_valid(r, hostDict)
if not valid: continue
urls, host, direct = source_utils.check_directstreams(r, hoster)
for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
except:
pass
return sources
except:
failure = traceback.format_exc()
log_utils.log('SeriesFree - Exception: \n' + str(failure))
return sources
示例5: resolve
def resolve(self, url):
try:
urldata = urlparse.parse_qs(url)
urldata = dict((i, urldata[i][0]) for i in urldata)
post = {
'ipplugins': 1, 'ip_film': urldata['data-film'],
'ip_server': urldata['data-server'],
'ip_name': urldata['data-name'],
'fix': "0"}
p1 = client.request('http://freeputlockers.org/ip.file/swf/plugins/ipplugins.php',
post=post, referer=urldata['url'], XHR=True)
p1 = json.loads(p1)
p2 = client.request(
'http://freeputlockers.org/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=0' %
(p1['s'],
urldata['data-server']))
p2 = json.loads(p2)
p3 = client.request('http://freeputlockers.org/ip.file/swf/ipplayer/api.php?hash=%s' % (p2['hash']))
p3 = json.loads(p3)
n = p3['status']
if n is False:
p2 = client.request(
'http://freeputlockers.org/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=1' %
(p1['s'],
urldata['data-server']))
p2 = json.loads(p2)
url = "https:%s" % p2["data"].replace("\/", "/")
return url
except Exception:
failure = traceback.format_exc()
log_utils.log('FreePutlockers - Exception: \n' + str(failure))
return
示例6: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['title']
year = data['year']
url = urlparse.urljoin(self.base_link, self.search_link)
url = url % (title.replace(':', '').replace(' ','_'),year)
search_results = client.request(url)
varid = re.compile('var frame_url = "(.+?)"',re.DOTALL).findall(search_results)[0].replace('/embed/','/streamdrive/info/')
res_chk = re.compile('class="title"><h1>(.+?)</h1>',re.DOTALL).findall(search_results)[0]
varid = 'http:'+varid
holder = client.request(varid)
links = re.compile('"src":"(.+?)"',re.DOTALL).findall(holder)
for link in links:
vid_url = link.replace('\\','')
if '1080' in res_chk:
quality = '1080p'
elif '720' in res_chk:
quality = '720p'
else:
quality = 'DVD'
sources.append({'source': 'Googlelink', 'quality': quality, 'language': 'en', 'url': vid_url, 'direct': False, 'debridonly': False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('Watch32 - Exception: \n' + str(failure))
return sources
示例7: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None: return
urldata = urlparse.parse_qs(url)
urldata = dict((i, urldata[i][0]) for i in urldata)
title = urldata['title'].replace(':', ' ').lower()
year = urldata['year']
search_id = title.lower()
start_url = self.search_link % (self.base_link, search_id.replace(' ','%20'))
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
html = client.request(start_url,headers=headers)
Links = re.compile('"post","link":"(.+?)","title".+?"rendered":"(.+?)"',re.DOTALL).findall(html)
for link,name in Links:
link = link.replace('\\','')
if title.lower() in name.lower():
if year in name:
holder = client.request(link,headers=headers)
new = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(holder)[0]
end = client.request(new,headers=headers)
final_url = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(end)[0]
valid, host = source_utils.is_host_valid(final_url, hostDict)
sources.append({'source':host,'quality':'1080p','language': 'en','url':final_url,'info':[],'direct':False,'debridonly':False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('1080PMovies - Exception: \n' + str(failure))
return sources
示例8: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = client.request(url)
try:
match = re.compile('iframe id="odbIframe" src="(.+?)"').findall(r)
for url in match:
host = url.split('//')[1].replace('www.', '')
host = host.split('/')[0].lower()
sources.append({
'source': host,
'quality': 'HD',
'language': 'en',
'url': url,
'direct': False,
'debridonly': False
})
except Exception:
failure = traceback.format_exc()
log_utils.log('ODB - Exception: \n' + str(failure))
return sources
except Exception:
failure = traceback.format_exc()
log_utils.log('ODB - Exception: \n' + str(failure))
return sources
return sources
示例9: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None: return
urldata = urlparse.parse_qs(url)
urldata = dict((i, urldata[i][0]) for i in urldata)
title = urldata['title'].replace(':', ' ').lower()
year = urldata['year']
search_id = title.lower()
start_url = urlparse.urljoin(self.base_link, self.search_link % (search_id.replace(' ','+') + '+' + year))
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
html = client.request(start_url,headers=headers)
Links = re.compile('a href="(.+?)" title="(.+?)"',re.DOTALL).findall(html)
for link,name in Links:
if title.lower() in name.lower():
if year in name:
holder = client.request(link,headers=headers)
Alterjnates = re.compile('<button class="text-capitalize dropdown-item" value="(.+?)"',re.DOTALL).findall(holder)
for alt_link in Alterjnates:
alt_url = alt_link.split ("e=")[1]
valid, host = source_utils.is_host_valid(alt_url, hostDict)
sources.append({'source':host,'quality':'1080p','language': 'en','url':alt_url,'info':[],'direct':False,'debridonly':False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('1080PMovies - Exception: \n' + str(failure))
return sources
示例10: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
return urllib.urlencode({'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'localtvshowtitle': localtvshowtitle, 'year': year})
except:
failure = traceback.format_exc()
log_utils.log('Library - Exception: \n' + str(failure))
return
示例11: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['premiered'], url['season'], url['episode'] = premiered, season, episode
try:
clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
search_results = client.request(search_url)
parsed = client.parseDOM(search_results, 'div', {'id': 'movie-featured'})
parsed = [(client.parseDOM(i, 'a', ret='href'), re.findall('<b><i>(.+?)</i>', i)) for i in parsed]
parsed = [(i[0][0], i[1][0]) for i in parsed if cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
url = parsed[0][0]
except:
pass
data = client.request(url)
data = client.parseDOM(data, 'div', attrs={'id': 'details'})
data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]
return url[0][0]
except:
failure = traceback.format_exc()
log_utils.log('CinemaMega - Exception: \n' + str(failure))
return
示例12: episode
def episode(self, data, imdb, tvdb, title, premiered, season, episode):
try:
seasonsPageURL = data['pageURL']
# An extra step needed before sources() can be called. Get the episode page.
# This code will crash if they change the website structure in the future.
session = self._createSession(data['UA'], data['cookies'], data['referer'])
xbmc.sleep(1000)
r = self._sessionGET(seasonsPageURL, session)
if r.ok:
soup = BeautifulSoup(r.content, 'html.parser')
mainDIV = soup.find('div', {'class': 'tv_container'})
firstEpisodeDIV = mainDIV.find('div', {'class': 'show_season', 'data-id': season})
# Filter the episode HTML entries to find the one that represents the episode we're after.
episodeDIV = next((element for element in firstEpisodeDIV.next_siblings if not isinstance(
element, NavigableString) and next(element.a.strings, '').strip('E ') == episode), None)
if episodeDIV:
return {
'pageURL': self.BASE_URL + episodeDIV.a['href'],
'UA': session.headers['User-Agent'],
'referer': seasonsPageURL,
'cookies': session.cookies.get_dict()
}
return None
except Exception:
failure = traceback.format_exc()
log_utils.log('PrimewireGR - Exception: \n' + str(failure))
return
示例13: __get_movie_url
def __get_movie_url(self, data):
try:
query = data['title'].lower().replace(' ', '+')
path = self.movie_search % query
url = urlparse.urljoin(self.base_link, path)
response = client.request(url, headers=self.headers)
movie_id = json.loads(response)[0]['id']
path = self.movie_details % movie_id
url = urlparse.urljoin(self.base_link, path)
response = client.request(url, headers=self.headers)
token_encrypted = json.loads(response)['langs'][0]['sources'][0]['hash']
token = self.__decrypt(token_encrypted)
path = self.fetcher % token
url = urlparse.urljoin(self.base_link, path)
return url
except:
failure = traceback.format_exc()
log_utils.log('ShowBox - Exception: \n' + str(failure))
return
示例14: tvshow
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
t = cleantitle.get(tvshowtitle)
q = urllib.quote_plus(cleantitle.query(tvshowtitle))
p = urllib.urlencode({'term': q})
r = client.request(self.search_link, post=p, XHR=True)
try: r = json.loads(r)
except: r = None
r = None
if r:
r = [(i['seo_url'], i['value'], i['label']) for i in r if 'value' in i and 'label' in i and 'seo_url' in i]
else:
r = proxy.request(self.search_link_2 % q, 'tv shows')
r = client.parseDOM(r, 'div', attrs = {'valign': '.+?'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a')) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]
r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r]
r = [(i[0], i[1], i[2][-1]) for i in r if i[2]]
r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
url = r[0][0]
url = proxy.parse(url)
url = url.strip('/').split('/')[-1]
url = url.encode('utf-8')
return url
except:
failure = traceback.format_exc()
log_utils.log('XWatchSeries - Exception: \n' + str(failure))
return
示例15: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url is None:
return sources
r = client.request(url)
quality = re.findall(">(\w+)<\/p", r)
if quality[0] == "HD":
quality = "720p"
else:
quality = "SD"
r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
for i in r[0]:
url = {
'url': i.attrs['href'],
'data-film': i.attrs['data-film'],
'data-server': i.attrs['data-server'],
'data-name': i.attrs['data-name']}
url = urllib.urlencode(url)
sources.append({'source': i.content, 'quality': quality, 'language': 'en',
'url': url, 'direct': False, 'debridonly': False})
return sources
except Exception:
failure = traceback.format_exc()
log_utils.log('FreePutlockers - Exception: \n' + str(failure))
return sources