本文整理汇总了Python中resources.lib.libraries.client2.http_get函数的典型用法代码示例。如果您正苦于以下问题:Python http_get函数的具体用法?Python http_get怎么用?Python http_get使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了http_get函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: resolve
def resolve(self, url):
try:
#url = client.request(url, output='geturl')
if 'sezonlukdizi.com' in url: url = client2.http_get(url,allow_redirect=False)
control.log('############ SEZONLUKIDZ res-0 %s' % url)
url = client2.http_get(url,allow_redirect=False)
control.log('############ SEZONLUKIDZ res-1 %s' % url)
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
#else: url = url.replace('https://', 'http://')
return url
except:
return
示例2: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
path = urlparse.urlparse(url).path
result = client2.http_get(url)
result = re.sub(r'[^\x00-\x7F]+','', result)
result = client.parseDOM(result, 'li')
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result]
result = [i[0] for i in result if len(i[0]) > 0 and path in i[0][0] and len(i[1]) > 0 and 'Altyaz' in i[1][0]][0][0]
url = urlparse.urljoin(self.base_link, result)
result = client2.http_get(url)
result = re.sub(r'[^\x00-\x7F]+','', result)
result = client.parseDOM(result, 'div', attrs = {'class': 'video-player'})[0]
result = client.parseDOM(result, 'iframe', ret='src')[-1]
control.log('RRRR %s' % result)
try:
url = base64.b64decode(urlparse.parse_qs(urlparse.urlparse(result).query)['id'][0])
if not url.startswith('http'): raise Exception()
except:
url = client2.http_get(result)
url = urllib.unquote_plus(url.decode('string-escape'))
frame = client.parseDOM(url, 'iframe', ret='src')
control.log('RRRR frame %s' % frame)
if len(frame) > 0:
url = [client2.http_get(frame[-1], allow_redirect = False)]
else: url = re.compile('"(.+?)"').findall(url)
url = [i for i in url if 'ok.ru' in i or 'vk.com' in i or 'openload.co' in i][0]
try: url = 'http://ok.ru/video/%s' % urlparse.parse_qs(urlparse.urlparse(url).query)['mid'][0]
except: pass
if 'openload.co' in url: host = 'openload.co' ; direct = False ; url = [{'url': resolvers.request(url), 'quality': 'HD'}]
elif 'ok.ru' in url: host = 'vk' ; direct = True ;url = [{'url': resolvers.request(url), 'quality': 'HD'}]
elif 'vk.com' in url: host = 'vk' ; direct = True ; url = [{'url': resolvers.request(url), 'quality': 'HD'}]
else: raise Exception()
for i in url: sources.append({'source': host, 'quality': i['quality'], 'provider': 'Onlinedizi', 'url': i['url'], })
return sources
except:
return sources
示例3: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
content = re.compile('(.+?)\?episode=\d*$').findall(url)
content = 'movie' if len(content) == 0 else 'episode'
try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
except: pass
url = urlparse.urljoin(self.base_link, url) + '/watching.html'
result = client2.http_get(url)
movie = client.parseDOM(result, 'div', ret='movie-id', attrs = {'id': 'media-player'})[0]
mtoken = client.parseDOM(result, 'div', ret='player-token', attrs = {'id': 'media-player'})[0]
try:
quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0].lower()
except: quality = 'hd'
if quality == 'cam' or quality == 'ts': quality = 'CAM'
elif quality == 'hd': quality = 'HD'
else: quality = 'SD'
url = '/ajax/get_episodes/%s/%s' % (movie, mtoken)
url = urlparse.urljoin(self.base_link, url)
result = client2.http_get(url)
result = client.parseDOM(result, 'div', attrs = {'class': 'les-content'})
result = zip(client.parseDOM(result, 'a', ret='onclick'), client.parseDOM(result, 'a', ret='episode-id'), client.parseDOM(result, 'a'))
result = [(re.sub('[^0-9]', '', i[0].split(',')[0]), re.sub('[^0-9a-fA-F]', '', i[0].split(',')[-1]), i[1], ''.join(re.findall('(\d+)', i[2])[:1])) for i in result]
result = [(i[0], i[1], i[2], i[3]) for i in result]
if content == 'episode': result = [i for i in result if i[3] == '%01d' % int(episode)]
links = [('/ajax/load_episode/%s/%s' % (i[2], i[1]), 'gvideo') for i in result if 2 <= int(i[0]) <= 11]
for i in links:
url1 = urlparse.urljoin(self.base_link, i[0])
sources.append({'source': i[1], 'quality': quality, 'provider': 'Muchmovies', 'url': i[0]})
links = []
links += [('/ajax/load_embed/%s/%s' % (i[2], i[1]), 'openload') for i in result if i[0] == '14']
links += [('/ajax/load_embed/%s/%s' % (i[2], i[1]), 'videomega') for i in result if i[0] == '13']
#links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), 'videowood.tv') for i in result if i[0] == '12']
for i in links:
url1 = urlparse.urljoin(self.base_link, i[0])
sources.append({'source': i[1], 'quality': quality, 'provider': 'Muchmovies', 'url': i[0]})
return sources
except:
return sources
示例4: get_movie
def get_movie(self, imdb, title, year):
try:
query = self.search_link % urllib.quote(title)
query = urlparse.urljoin(self.base_link, query)
#control.log("rainierland-0 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % query)
result = client2.http_get(query)
title = cleantitle.movie(title)
years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
r = client.parseDOM(result, 'div', attrs = {'class': 'thumb'})
#control.log("rainierland-1 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
#control.log("rainierland-2 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)
r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
#control.log("rainierland-3 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)
r = [(re.sub('http.+?//.+?/','', i[0]), i[1]) for i in r]
#control.log("rainierland-4 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)
r = [('/'.join(i[0].split('/')[:2]), i[1]) for i in r]
r = [x for y,x in enumerate(r) if x not in r[:y]]
r = [i for i in r if title == cleantitle.movie(i[1])]
u = [i[0] for i in r][0]
url = urlparse.urljoin(self.base_link, u)
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
#control.log("rainierland url @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % url)
return url
except:
return
示例5: get_episode
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall(url)[0]
query = self.search_link % urllib.quote(tvshowtitle)
query = urlparse.urljoin(self.base_link, query)
#result = client.source(query)
result = client2.http_get(query)
tvshowtitle = cleantitle.tv(tvshowtitle)
season = '%01d' % int(season)
episode = '%01d' % int(episode)
years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
result = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'})
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h2'), re.compile('class *= *[\'|\"]jt-info[\'|\"]>(\d{4})<').findall(i)) for i in result]
result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
result = [(i[0], re.compile('(.+?) - Season (\d*)$').findall(i[1]), i[2]) for i in result]
result = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in result if len(i[1]) > 0]
result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])]
result = [i for i in result if season == i[2]]
result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1)) for i in result]
result = [i[0] for i in result if any(x in i[2] for x in years)][0]
result += '?S%02dE%02d' % (int(season), int(episode))
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例6: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
control.log('######### DIZILAB ## %s ' % url)
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
#result = client.source(url)
result = client2.http_get(url)
try:
url = re.compile('"episode_player".*?src="([^"]+)"').findall(result)
links = [(i[0], '1080p') for i in url if int(i[1]) >= 1080]
links += [(i[0], 'HD') for i in url if 720 <= int(i[1]) < 1080]
links += [(i[0], 'SD') for i in url if 480 <= int(i[1]) < 720]
if not 'SD' in [i[1] for i in links]: links += [(i[0], 'SD') for i in url if 360 <= int(i[1]) < 480]
for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Dizilab', 'url': i[0]})
except:
pass
try:
url = client.parseDOM(result, 'iframe', ret='src')
url = [i for i in url if 'openload.' in i][0]
sources.append({'source': 'openload.co', 'quality': client.file_quality_openload(url)['quality'], 'provider': 'Dizilab', 'url': url})
except:
pass
return sources
except:
return sources
示例7: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
myurl = urlparse.urljoin(self.base_link, '/forum/' + url)
result = client2.http_get(myurl)
result10 = result
result10 = client.parseDOM(result10, 'div', attrs={'id': '5throw'})[0]
result10 = client.parseDOM(result10, 'a', attrs={'rel': 'nofollow'}, ret='href')
mquality = 'HD'
if '1080'in url: mquality = '1080p'
for i in result10:
if 'mail.ru' in i:
myresolve = resolvers.request(i)
sources.append({'source': 'MAIL.RU', 'quality': mquality, 'provider': 'Dayt', 'url': myresolve})
if 'yadi.sk' in i:
myresolve = resolvers.request(i)
sources.append({'source': 'YADISK', 'quality': mquality, 'provider': 'Dayt', 'url': myresolve})
result = client.parseDOM(result, 'iframe', ret='src')
result = [i for i in result if 'pasep' in i][0]
result = client.source(result)
result = client.parseDOM(result, 'iframe', ret='src')[0]
result = client.source(result)
result = client.parseDOM(result, 'iframe', ret='src')[0]
links = resolvers.request(result)
for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Dayt', 'url': i[0]})
return sources
except:
return sources
示例8: daytse_movies
def daytse_movies(dbname):
con = lite.connect(dbname)
cur = con.cursor()
#cur.execute("DROP TABLE IF EXISTS movies")
#cur.executemany("INSERT INTO Cars VALUES(?, ?, ?)", cars)
#cur.execute("CREATE TABLE movies (title TEXT, link TEXT, quality TEXT, UNIQUE (link))")
for j in range(1,2):
print'------- %s ' % j
src = 'http://dayt.se/movies/index.php?&page=%s' % j
result = ''
result = client2.http_get(src).decode('windows-1256').encode('utf8')
result = client.parseDOM(result, 'table', attrs={'class': 'topic_table'})
for i in result:
print '-------------------------'
#print i
print client.parseDOM(i, 'img', attrs={'class': 'image'}, ret='alt')[0]
print client.parseDOM(i, 'a', attrs={'target': '_self'}, ret='href')[1]
#result = [(client.parseDOM(i, 'img', attrs={'class': 'image'}, ret='alt')[0],
# client.parseDOM(i, 'a', attrs={'target': '_self'}, ret='href')[0],
# re.findall('Quality: (\d+).*</pre>', i)[0]) for i in result]
#cur.executemany("INSERT INTO movies VALUES(?, ?, ?)", result)
con.commit()
con.close()
示例9: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
self.sources =[]
mylinks = []
result = client2.http_get(url)
mytitle = re.compile('<title>(.*?)</title>', re.DOTALL).findall(result)[0]
if any(word in mytitle.lower() for word in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'ts']):
quality = 'CAM'
elif '1080p' in mytitle:
quality = '1080p'
elif '720p' in mytitle:
quality = 'HD'
else:
quality = 'SD'
links = client.parseDOM(result, 'a', attrs={'rel': 'nofollow'})
links = [i for i in links if i.startswith('http')]
for a in links:
mylinks.append([a,quality])
threads = []
for i in mylinks: threads.append(workers.Thread(self.check, i))
[i.start() for i in threads]
for i in range(0, 10 * 2):
is_alive = [x.is_alive() for x in threads]
if all(x == False for x in is_alive): break
time.sleep(1)
return self.sources
except:
return self.sources
示例10: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = client2.http_get(url)
links = client.parseDOM(result, 'div', attrs={'class':'movieplay'})
links = [client.parseDOM(i, 'iframe', ret='src')[0] for i in links]
for i in links:
try:
host = urlparse.urlparse(i).netloc
host = host.replace('www.', '').replace('embed.', '')
host = host.rsplit('.', 1)[0]
host = host.lower()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': 'SD', 'provider': 'CdaOnline', 'url': i, 'vtype':'BD'})
except:
pass
return sources
except:
return sources
示例11: get_movie
def get_movie(self, imdb, title, year):
mytitle = title
try:
t = 'http://www.imdb.com/title/%s' % imdb
t = client.source(t, headers={'Accept-Language': 'es-ES'})
t = client.parseDOM(t, 'title')[0]
t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip()
mytitle = t
except:
pass
try:
t = cleantitle.get(mytitle)
query = self.search3_link % urllib.quote_plus(cleantitle.query2(mytitle))
query = urlparse.urljoin(self.base_link, query)
result = client2.http_get(query)
result = re.sub(r'[^\x00-\x7F]+','', result)
r = result.split('<li class=')
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'i'), re.findall('\((\d{4})\)', i)) for i in r]
r = [(i[0][0], re.sub('\(|\)','', i[1][0]), i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
r = [i[0] for i in r if year == i[2]][0]
try: url = re.findall('//.+?(/.+)', r)[0]
except: url = r
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
pass
示例12: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = client2.http_get(url)
result = re.sub(r'[^\x00-\x7F]+', ' ', result)
pages = []
try:
r = client.parseDOM(result, 'div', attrs = {'id': 'embed'})[0]
pages.append(client.parseDOM(r, 'iframe', ret='src')[0])
except:
pass
try:
r = client.parseDOM(result, 'div', attrs = {'id': 'playerMenu'})[0]
r = client.parseDOM(r, 'div', ret='data-id', attrs = {'class': 'item'})[0]
r = cloudflare.source(urlparse.urljoin(self.base_link, self.video_link), post=urllib.urlencode( {'id': r} ))
pages.append(client.parseDOM(r, 'iframe', ret='src')[0])
except:
pass
for page in pages:
try:
result = client2.http_get(page)
captions = re.search('kind\s*:\s*(?:\'|\")captions(?:\'|\")', result)
if not captions: raise Exception()
result = re.compile('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?[^"]*"').findall(result)
links = [(i[0], '1080p') for i in result if int(i[1]) >= 1080]
links += [(i[0], 'HD') for i in result if 720 <= int(i[1]) < 1080]
links += [(i[0], 'SD') for i in result if 480 <= int(i[1]) < 720]
for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Sezonlukdizi', 'url': i[0]})
except:
pass
return sources
except:
return sources
示例13: check
def check(url):
try:
ifstream = re.search('//.+?/(?:embed|f)/([0-9a-zA-Z-_]+)',(url)[0])
if ifstream: return True
id = re.compile('//.+?/(?:embed|f)/([0-9a-zA-Z-_]+)').findall(url)[0]
url = 'https://openload.co/embed/%s/' % id
result = client2.http_get(url)
if result == None: return False
if '>We are sorry!<' in result: return False
return True
except:
return False
示例14: onlinedizi_tvcache
def onlinedizi_tvcache(self):
try:
result = client2.http_get(self.base_link)
result = client.parseDOM(result, 'ul', attrs = {'class': 'all-series-list.+?'})[0]
result = client.parseDOM(result, 'li')
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result]
result = [(i[0][-1], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [(re.compile('http.+?//.+?/diziler(/.+?/)').findall(i[0]), re.sub('&#\d*;','', i[1])) for i in result]
result = [(i[0][0], cleantitle.get(i[1])) for i in result if len(i[0]) > 0]
return result
except:
return
示例15: get_sources
def get_sources(self, url, hosthdDict, hostDict, locDict):
sources = []
#control.log("rainierland-sources-0 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ ")
try:
r = urlparse.urljoin(self.base_link, url)
result = client2.http_get(r)
#control.log("rainierland-sources-1 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % result)
r = client.parseDOM(result, 'div', attrs = {'class': 'screen fluid-width-video-wrapper'})[0]
#control.log("rainierland-sources-2 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)
r = re.compile('src="(.*?)"').findall(r)
#control.log("rainierland-sources-3 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)
if len(r) > 0:
t = urlparse.urljoin(self.base_link, r[0])
r2 = client2.http_get(t)
#control.log("rainierland-sources-4 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r2)
r3 = re.compile('<source src="(.*?)"').findall(r2)
for i in r3:
try:
sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Rainierland', 'url': i})
except:
pass
#control.log("rainierland-sources-5 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r3)
r4 = client.parseDOM(r2, 'a', ret='href')
#control.log("rainierland-sources-5 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r4)
for i in r4:
try:
url = resolvers.request(i)
sources.append({'source': 'openload', 'quality': 'HD', 'provider': 'Rainierland', 'url': url})
except:
pass
return sources
except:
return sources