本文整理汇总了Python中resources.lib.libraries.control.log函数的典型用法代码示例。如果您正苦于以下问题:Python log函数的具体用法?Python log怎么用?Python log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_episode
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
control.log('##### 1 - url %s' % url)
try:
if url == None: return
num = base64.b64decode('aHR0cDovL3RoZXR2ZGIuY29tL2FwaS9FQUNCMkRGNTM0Njc3OEU4L3Nlcmllcy8lcy9kZWZhdWx0LyUwMWQvJTAxZA==')
num = num % (tvdb, int(season), int(episode))
control.log('##### 2 - num %s' % num)
num = client.request(num)
num = client.parseDOM(num, 'absolute_number')[0]
control.log('##### 3 - num %s' % num)
url = urlparse.urljoin(self.base_link, url)
control.log('##### url %s' % url)
result = client.request(url)
control.log('##### res %s' % url)
result = result.decode('iso-8859-1').encode('utf-8')
result = client.parseDOM(result, 'tr', attrs = {'class': ''})
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'td', attrs = {'class': 'epnum'})) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [i[0] for i in result if num == i[1]][0]
url = urlparse.urljoin(self.base_link, result)
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例2: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
control.log('######### DIZILAB ## %s ' % url)
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
#result = client.source(url)
result = client2.http_get(url)
try:
url = re.compile('"episode_player".*?src="([^"]+)"').findall(result)
links = [(i[0], '1080p') for i in url if int(i[1]) >= 1080]
links += [(i[0], 'HD') for i in url if 720 <= int(i[1]) < 1080]
links += [(i[0], 'SD') for i in url if 480 <= int(i[1]) < 720]
if not 'SD' in [i[1] for i in links]: links += [(i[0], 'SD') for i in url if 360 <= int(i[1]) < 480]
for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Dizilab', 'url': i[0]})
except:
pass
try:
url = client.parseDOM(result, 'iframe', ret='src')
url = [i for i in url if 'openload.' in i][0]
sources.append({'source': 'openload.co', 'quality': client.file_quality_openload(url)['quality'], 'provider': 'Dizilab', 'url': url})
except:
pass
return sources
except:
return sources
示例3: resolve
def resolve(self, url):
control.log('RESSS %s' % url)
try:
if 'openload.co' in url:
url = resolvers.request(url)
return url
if 'movieshd' in url:
r = self.request(url)[0]
r = re.findall("file: '([^']+)',label: '(\d+)", r)
r1 = sorted(r, key=lambda k: k[1])
r2 = client.replaceHTMLCodes(r1[-1][0])
#r2 = client.googlepass(url)
return r2
if 'seriesonline' in url:
r = self.request(url)[0]
r = [client.parseDOM(r, 'source', ret='src'), client.parseDOM(r,'source', ret='label')]
r = zip(r[0],r[1])
r1 = sorted(r, key=lambda k: k[1])
r2 = client.replaceHTMLCodes(r1[-2][0])
r2 = client.googlepass(url)
return r2
return False
except Exception as e:
control.log('RESSS %S' % e)
pass
示例4: get_episode
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
if control.setting('alluc_user'):
if control.setting('realdebrid_token') or control.setting('premiumize_user'):
self.moviesearch_link = '/api/search/download?user=%s&password=%s&query=%s'
else:
self.moviesearch_link = '/api/search/stream/?user=%s&password=%s&query=%s'
tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall(url)[0]
season, episode = season.zfill(2), episode.zfill(2)
query = '%s s%se%s' % (tvshowtitle, season, episode)
query = self.moviesearch_link % (control.setting('alluc_user'), control.setting('alluc_password'), urllib.quote_plus(query))
r = urlparse.urljoin(self.base_link, query)
r = r + "+%23newlinks"
r = requests.get(r).json()
for item in r['result']:
if len(item['hosterurls']) == 1 and 'en' in item['lang']:
tmp = item['hosterurls'][0]['url']
tmp = client.replaceHTMLCodes(tmp)
tmp = tmp.encode('utf-8')
title = item['title'].encode('utf-8')
self.stream_url.append({'url': tmp, 'hoster': item['hostername'], 'title': title })
return self.stream_url
except Exception as e:
control.log('alluc error tv')
control.log(e)
return
示例5: request
def request(url, post=None, headers=None, mobile=False, safe=False, timeout='30'):
try:
control.log('[cloudflare] request %s' % url)
try: headers.update(headers)
except: headers = {}
agent = cache.get(cloudflareAgent, 168)
if not 'User-Agent' in headers: headers['User-Agent'] = agent
u = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
cookie = cache.get(cloudflareCookie, 168, u, post, headers, mobile, safe, timeout)
result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, output='response', error=True)
if result[0] == '503':
agent = cache.get(cloudflareAgent, 0) ; headers['User-Agent'] = agent
cookie = cache.get(cloudflareCookie, 0, u, post, headers, mobile, safe, timeout)
result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout)
else:
result= result[1]
#control.log('[cloudflare] result %s' % result)
return result
except:
return
示例6: get_movie
def get_movie(self, imdb, title, year):
try:
query = self.search_link % urllib.quote(title)
query = urlparse.urljoin(self.base_link, query)
result = client2.http_get(query)
title = cleantitle.movie(title)
years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
r = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(re.sub('http.+?//.+?/','', i[0]), i[1]) for i in r]
r = [('/'.join(i[0].split('/')[:2]), i[1]) for i in r]
r = [x for y,x in enumerate(r) if x not in r[:y]]
r = [i for i in r if title == cleantitle.movie(i[1])]
u = [i[0] for i in r][0]
url = urlparse.urljoin(self.base_link, u)
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
control.log("@@@@@@@@@@@@@@@ URL %s" % url)
return url
except:
return
示例7: get_movie
def get_movie(self, imdb, title, year):
try:
query = self.search_link
post = {'searchquery': title, 'searchin': '1'}
post = urllib.urlencode(post)
result = ''
headers = {"Content-Type":"application/x-www-form-urlencoded", "Referer":urlparse.urljoin(self.base_link, query)}
result = client.request(urlparse.urljoin(self.base_link, query), post=post, headers=headers)
#if 'widget search-page' in str(result): break
print("R",result)
result = client.parseDOM(result, 'div', attrs = {'class': 'widget search-page'})[0]
result = client.parseDOM(result, 'td')
title = cleantitle.movie(title)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href')[-1], client.parseDOM(i, 'a')[-1]) for i in result]
result = [i for i in result if title == cleantitle.movie(i[1])]
result = [i[0] for i in result if any(x in i[1] for x in years)][0]
url = client.replaceHTMLCodes(result)
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
except: pass
url = urlparse.urlparse(url).path
url = url.encode('utf-8')
return url
except Exception as e:
control.log("ERR iwatch %s" % e)
return
示例8: get_cached_url
def get_cached_url(self, url, data='', cache_limit=8):
try:
dbcon = database.connect(control.sourcescachedUrl)
dbcur = dbcon.cursor()
#dbcur.execute(
# "CREATE TABLE IF NOT EXISTS rel_url (""source TEXT, ""imdb_id TEXT, ""season TEXT, ""episode TEXT, ""rel_url TEXT, ""UNIQUE(source, imdb_id, season, episode)"");")
dbcur.execute(
"CREATE TABLE IF NOT EXISTS url_cache (url VARCHAR(255) NOT NULL, data VARCHAR(255), response, res_header, timestamp, PRIMARY KEY(url, data))")
except:
pass
try:
if data is None: data = ''
html = ''
res_header = []
created = 0
now = time.time()
age = now - created
limit = 60 * 60 * cache_limit
dbcur.execute('SELECT timestamp, response, res_header FROM url_cache WHERE url = %s and data=%s' % (url,data))
rows = dbcur.fetchall()
control.log('DB ROWS: Url: %s, ' % (rows))
if rows:
created = float(rows[0][0])
res_header = json.loads(rows[0][2])
age = now - created
if age < limit:
html = rows[0][1]
control.log('DB Cache: Url: %s, Data: %s, Cache Hit: %s, created: %s, age: %.2fs (%.2fh), limit: %ss' % (
url, data, bool(html), created, age, age / (60 * 60), limit))
return created, res_header, html
except:
return
示例9: get_movie
def get_movie(self, imdb, title, year):
try:
query = urlparse.urljoin(self.base_link, self.search_link)
query = query % urllib.quote_plus(title)
#for i in range(5):
r = client.request(query)
# if not r == None: break
t = cleantitle.get(title)
r = client.parseDOM(r, 'div', attrs = {'class': 'col-lg.+?'})
print("R1",r)
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], i[1], re.findall('(\d{4})', i[1])) for i in r]
r = [(i[0], i[1], i[2][-1]) for i in r if len(i[2]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
print("R6", r)
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except Exception as e:
control.log('ERROR XMOVIES GET %s' % e)
return
示例10: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
for item in url:
newLink = client.request(item['url'])
divArea = client.parseDOM(newLink, 'div', attrs = {"class": "postarea"})
match = client.parseDOM(divArea, "a", ret = "href", attrs = {'rel': 'nofollow'})
for link in match:
if re.match('((?!\.part[0-9]).)*$', link, flags=re.IGNORECASE) and '://' in link:
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(link.strip().lower()).netloc)[0].split('.')[0]
scheme = urlparse.urlparse(link).scheme
if host in hostDict and scheme:
if '1080' in link:
quality = '1080p'
elif '720' in link:
quality = 'HD'
else:
quality = 'SD'
fileLink = client.replaceHTMLCodes(link)
fileLink = fileLink.encode('utf-8')
sources.append({ 'source': host, 'quality': quality, 'provider': 'wrzcraft', 'url': fileLink })
return sources
except Exception as e:
control.log('ERROR wrzcraft sources %s' % e)
return sources
示例11: get_movie
def get_movie(self, imdb, title, year):
try:
urls = []
url = self.moviesearch_link % (cleantitle.geturl(title), year)
r = urlparse.urljoin(self.base_link, url)
r = client.request(r)
posts = client.parseDOM(r, 'div', attrs = {'class': 'post'})
for post in posts:
extra = False
tags = client.parseDOM(post, 'a', attrs = {'rel' : 'category tag'})
for tag in tags:
#Make sure it isnt an extra
if tag == 'Extras':
extra = True
break
if extra == False:
containerDiv = client.parseDOM(post, 'div', attrs = {'class' : 'posttitle'})
if not containerDiv:
containerDiv = client.parseDOM(post, 'div', attrs = {'class' : 'expandposttitle'})
href = client.parseDOM(containerDiv, 'a', ret='href')[0]
title = client.parseDOM(containerDiv,'a', ret='title')[0]
href = href.encode('utf-8')
title = title.encode('utf-8')
urls.append({'url' : href, 'title' : title})
return urls
except Exception as e:
control.log('wrzcraft error')
control.log(e)
return
示例12: get_episode
def get_episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
#url = urlparse.parse_qs(url)
#url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
#tvshowtitle = cleantitle.query10(url['tvshowtitle'])
tvshowtitle = url.split('/')[-1]
r = '/tv-show/%s/season/%01d/episode/%01d' % (tvshowtitle, int(season), int(episode))
#y = '/tv-show/%s/season/%01d' % (tvshowtitle, int(season))
control.log('AAAA y >>>>>> %s' % r)
#result = client.request(urlparse.urljoin(self.base_link, y))
#print "ResUlt get_episode",result
#result = client.parseDOM(result,'span', attrs={'class':'dat'})[0]
#if url['year'] == str(result.strip()):
# url = r.encode('utf-8')
# control.log('>>>>>> Putlocker URL %s' % url)
# return url
return r
except:
return
示例13: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
#c = client.request(u output='cookie', error=True)
links = client.parseDOM(result, 'tr')
links = [(client.parseDOM(i, 'a', attrs = {'class': 'watch'}, ret='data-iframe')[0],
client.parseDOM(i, 'img', ret='alt')[0],
client.parseDOM(i, 'td', attrs={'class':'text-center'})[0]) for i in links]
for i in links:
try:
url1 = '%s?%s' % (url, i[0])
url1 = url1.encode('utf-8')
#print ("Q",videoquality.solvequality(url),url)
sources.append({'source': i[1].encode('utf-8'), 'quality': 'SD', 'provider': 'Alltube', 'url': url1, 'vtype':i[2].encode('utf-8')})
except Exception as e:
control.log('Alltube sources Exception: %s' % e)
pass
#control.log('Alltube sources : %s' % sources)
return sources
except:
return sources
示例14: resolve
def resolve(self, url):
link = client.source(url)
url=re.compile('src="(.+?)" style').findall(link)[0]
link = client.source(url)
try:
url=re.compile("window.atob\('(.+?)'\)\)").findall(link)[0]
func_count = len(re.findall('window\.atob', link))
print(">>>>>>>> ILE",func_count)
for _i in xrange(func_count):
url = base64.decodestring(url)
url=re.compile("<source src='(.+?)'").findall(url)[0]
control.log(">> u2 %s |ENcoded %s",url, resolvers.request(url))
url = resolvers.request(url)
except:
try:
url=re.compile('src="(.+?)"').findall(link)[0]
host = urlparse.urlparse(url).netloc
host = host.replace('www.', '').replace('embed.', '')
host = host.rsplit('.', 1)[0]
host = host.lower()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
url = resolvers.request(url)
except:pass
#print("--------------->>>>> URL",url)
return url
示例15: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
self.sources =[]
mylinks = []
hostDict = hostDict.sort()
for i in hostDict:
control.log("WA HO %s" % i)
if url == None: return self.sources
url = url.replace('/json/', '/')
result = ''
result, headers, content, cookie = client.request(urlparse.urljoin(self.base_link, url), output='extended')
#result, headers, content, cookie = client.request(url, limit='0', output='extended')
self.headers['Referer'] = urlparse.urljoin(self.base_link, url)
self.headers['Cookie'] = cookie
result = result.replace('\n','')
result = result.decode('iso-8859-1').encode('utf-8')
result = client.parseDOM(result, 'div', attrs = {'id': 'lang_1'})[0]
links = re.compile('href=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>].+?title=[\'|\"|\s|\<]*(.+?)[\'|\"|\s|\>]').findall(result)
links = [x for y,x in enumerate(links) if x not in links[:y]]
for i in links:
try:
host = i[1]
host = host.split('.', 1)[0]
host = host.strip().lower()
#if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
url = i[0]
url = client.replaceHTMLCodes(url)
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
except: pass
if not url.startswith('http'): url = urlparse.urljoin(self.base_link, url)
if not '/cale/' in url: raise Exception()
url = url.encode('utf-8')
url = url.replace('/json/', '/')
url = urlparse.urlparse(url).path
mylinks.append([url, 'SD'])
except:
pass
threads = []
for i in mylinks: threads.append(workers.Thread(self.check, i, hostDict))
[i.start() for i in threads]
for i in range(0, 10 * 2):
is_alive = [x.is_alive() for x in threads]
if all(x == False for x in is_alive): break
time.sleep(1)
return self.sources
except:
return self.sources