本文整理汇总了Python中resources.lib.modules.client.randomagent函数的典型用法代码示例。如果您正苦于以下问题:Python randomagent函数的具体用法?Python randomagent怎么用?Python randomagent使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了randomagent函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sources
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
ref = urlparse.urljoin(self.base_link, url)
url = urlparse.urljoin(self.base_link, self.ajax_link % re.findall('-(\w+)$', ref)[0])
headers = {'Referer': ref, 'User-Agent': client.randomagent()}
result = client.request(url, headers=headers, post='')
result = base64.decodestring(result)
result = json.loads(result).get('playinfo', [])
if isinstance(result, basestring):
result = result.replace('embed.html', 'index.m3u8')
base_url = re.sub('index\.m3u8\?token=[\w\-]+', '', result)
r = client.request(result, headers=headers)
r = [(i[0], i[1]) for i in re.findall('#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+)[^\n]+\n([^\n]+)', r, re.DOTALL) if i]
r = [(source_utils.label_to_quality(i[0]), i[1] + source_utils.append_headers(headers)) for i in r]
r = [{'quality': i[0], 'url': base_url+i[1]} for i in r]
for i in r: sources.append({'source': 'CDN', 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': True, 'debridonly': False})
elif result:
result = [i.get('link_mp4') for i in result]
result = [i for i in result if i]
for i in result:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'de', 'url': i, 'direct': True, 'debridonly': False})
except: pass
return sources
except:
return
示例2: get2
def get2(url, check, headers=None, data=None):
if headers is None:
headers = {
'User-Agent': client.randomagent(),
}
try:
request = urllib2.Request(url, headers=headers, data=data)
html = urllib2.urlopen(request, timeout=10).read()
if check in str(html): return html
except:
pass
try:
new_url = get_proxy_url() % urllib.quote_plus(url)
headers['Referer'] = 'http://%s/' % urlparse.urlparse(new_url).netloc
request = urllib2.Request(new_url, headers=headers)
response = urllib2.urlopen(request, timeout=10)
html = response.read()
response.close()
if check in html: return html
except:
pass
try:
new_url = get_proxy_url() % urllib.quote_plus(url)
headers['Referer'] = 'http://%s/' % urlparse.urlparse(new_url).netloc
request = urllib2.Request(new_url, headers=headers)
html = urllib2.urlopen(request, timeout=10).read()
if check in html: return html
except:
pass
return
示例3: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
year = url['year']
h = {'User-Agent': client.randomagent()}
title = cleantitle.geturl(url['title']).replace('-', '+')
url = urlparse.urljoin(self.base_link, self.search_link % title)
r = requests.get(url, headers=h)
r = BeautifulSoup(r.text, 'html.parser').find('div', {'class': 'item'})
r = r.find('a')['href']
r = requests.get(r, headers=h)
r = BeautifulSoup(r.content, 'html.parser')
quality = r.find('span', {'class': 'calidad2'}).text
url = r.find('div', {'class':'movieplay'}).find('iframe')['src']
if not quality in ['1080p', '720p']:
quality = 'SD'
valid, host = source_utils.is_host_valid(url, hostDict)
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
return sources
except:
print("Unexpected error in Furk Script: check_api", sys.exc_info()[0])
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, exc_tb.tb_lineno)
return sources
示例4: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
try: import xbmc ; ip = xbmc.getIPAddress()
except: ip = 'London'
referer = 'http://www.einthusan.com/movies/watch.php?id=%s' % url
agent = client.randomagent()
headers = {'User-Agent': agent, 'Referer': referer}
url = 'http://cdn.einthusan.com/geturl/%s/hd/%s/' % (url, ip)
url = client.request(url, headers=headers)
url +='|%s' % urllib.urlencode({'User-agent': agent})
sources.append({'source': 'einthusan', 'quality': 'HD', 'provider': 'Einthusan', 'url': url, 'direct': True, 'debridonly': False})
return sources
except:
return sources
示例5: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if (self.user == '' or self.password == ''): raise Exception()
login = urlparse.urljoin(self.base_link, '/login')
post = {'username': self.user, 'password': self.password, 'returnpath': '/'}
post = urllib.urlencode(post)
headers = {'User-Agent':client.randomagent()}
rlogin = client.request(login, headers=headers, post=post, output='extended')
guid = re.findall('(.*?);\s', rlogin[2]['Set-Cookie'])[0]
headers['Cookie'] += '; '+guid
url = urlparse.urljoin(self.base_link, url)
result = client.request(url, headers=headers)
url = re.findall("embeds\[\d+\]\s*=\s*'([^']+)", result)[0]
url = client.parseDOM(url, 'iframe', ret='src')[0]
url = url.replace('https://', 'http://')
links = []
try:
dec = re.findall('mplanet\*(.+)', url)[0]
dec = dec.rsplit('&')[0]
dec = self._gkdecrypt(base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), dec)
dec = directstream.google(dec)
links += [(i['url'], i['quality'], 'gvideo') for i in dec]
except:
pass
result = client.request(url, headers=headers)
try:
url = re.findall('src\s*=\s*(?:\'|\")(http.+?)(?:\'|\")', result)
for i in url:
try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i})
except: pass
except:
pass
try:
url = client.parseDOM(result, 'source', ret='src')
url += re.findall('src\s*:\s*\'(.*?)\'', result)
url = [i for i in url if '://' in i]
links.append({'source': 'cdn', 'quality': 'HD', 'url': url[0]})
except:
pass
for i in links:
sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False})
return sources
except:
return sources
示例6: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
aliases = eval(data['aliases'])
headers = {}
if 'tvshowtitle' in data:
episode = int(data['episode'])
url = self.searchShow(data['tvshowtitle'], data['season'], data['year'], aliases, headers)
else:
episode = 0
url = self.searchMovie(data['title'], data['year'], aliases, headers)
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
url = re.sub('/watching.html$', '', url.strip('/'))
url = url + '/watching.html'
p = client.request(url)
if episode > 0:
r = client.parseDOM(p, 'div', attrs={'class': 'ep_link.+?'})[0]
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
r = [(i[0], re.findall('Episode\s+(\d+)', i[1])) for i in r]
r = [(i[0], i[1][0]) for i in r]
url = [i[0] for i in r if int(i[1]) == episode][0]
p = client.request(url, headers=headers, timeout='10')
referer = url
id = re.findall('load_player\(.+?(\d+)', p)[0]
r = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v3?id=%s' % id)
r = client.request(r, referer=referer, XHR=True)
url = json.loads(r)['value']
if (url.startswith('//')):
url = 'https:' + url
r = client.request(url, referer=referer, XHR=True)
headers = {
'User-Agent': client.randomagent(),
'Referer': referer
}
headers = '|' + urllib.urlencode(headers)
source = str(json.loads(r)['playlist'][0]['file']) + headers
sources.append({'source': 'CDN', 'quality': 'HD', 'language': 'en', 'url': source, 'direct': True, 'debridonly': False})
return sources
except:
return sources
示例7: __get_cookies
def __get_cookies(self, url):
h = {'User-Agent': client.randomagent()}
c = client.request(url, headers=h, output='cookie')
c = client.request(urlparse.urljoin(self.base_link, '/av'), cookie=c, output='cookie', headers=h, referer=url)
c = client.request(url, cookie=c, headers=h, referer=url, output='cookie')
return c, h
示例8: sources
def sources(self, url, hostDict, locDict):
sources = []
req = requests.Session()
headers = {'User-Agent': client.randomagent(), 'Origin': 'http://imdark.com', 'Referer': 'http://imdark.com',
'X-Requested-With': 'XMLHttpRequest'}
try:
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
query = urllib.quote_plus(title).lower()
result = req.get(self.base_link, headers=headers).text
darksearch = re.findall(r'darkestsearch" value="(.*?)"', result)[0]
result = req.get(self.base_link + self.search_link % (query, darksearch), headers=headers).text
r = client.parseDOM(result, 'div', attrs={'id':'showList'})
r = re.findall(r'<a\s+style="color:white;"\s+href="([^"]+)">([^<]+)', r[0])
r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and data['year'] in i[1]][0]
url = r[0]
print("INFO - " + url)
result = req.get(url, headers=headers).text
nonce = re.findall(r"nonce = '(.*?)'", result)[0]
tipi = re.findall(r'tipi = (.*?);', result)[0]
postData = {'action':'getitsufiplaying', 'tipi':tipi, 'jhinga':nonce}
result = req.post(self.base_link + self.ajax_link, data=postData, headers=headers).text
r = re.findall(r'"src":"(.*?)","type":"(.*?)","data-res":"(\d*?)"', result)
linkHeaders = 'Referer=http://imdark.com/&User-Agent=' + urllib.quote(client.randomagent()) + '&Cookie=' + urllib.quote('mykey123=mykeyvalue')
for i in r:
print(str(i))
try:
q = source_utils.label_to_quality(i[2])
sources.append({'source': 'CDN', 'quality': q, 'info': i[1].replace('\\', ''), 'language': 'en',
'url': i[0].replace('\\','') + '|' + linkHeaders,
'direct': True, 'debridonly': False})
except:
traceback.print_exc()
pass
for i in sources:
print("INFO SOURCES " + str(i))
return sources
except:
traceback.print_exc()
return sources
示例9: request
def request(self, url):
try:
req = urllib2.Request(url)
req.add_header('User-Agent', client.randomagent())
res = urllib2.urlopen(req)
r = res.read() if not res.info().getheader('Content-Encoding') == 'gzip' else gzip.GzipFile(fileobj=StringIO.StringIO(res.read())).read()
res.close()
return r
except:
return
示例10: _createSession
def _createSession(self, customHeaders={}):
# Create a 'requests.Session' and try to spoof a header from a web browser.
session = requests.Session()
session.headers.update(
{
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'User-Agent': customHeaders.get('UA', randomagent()),
'Accept-Language': 'en-US,en;q=0.5',
'Referer': customHeaders.get('referer', self.BASE_URL + '/'),
'DNT': '1'
}
)
return session
示例11: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
h = {'User-Agent': client.randomagent()}
result = client.request(url, output='extended', headers=h)
cookie = result[4]
ajax_prov = client.parseDOM(result[0], 'meta', attrs={'property': 'provision'}, ret='content')[0]
ajax_url = urlparse.urljoin(self.base_link, self.ajax_link) % ajax_prov
h['X-CSRFToken']=re.findall ('csrftoken=(.*?);', cookie)[0]
result = client.request(ajax_url, cookie=cookie, XHR=True, headers=h)
r = client.parseDOM(result, 'div', attrs={'class':'host-container pull-left'})
r = [(client.parseDOM(i, 'div', attrs={'class': 'url'}, ret='data-url'),
client.parseDOM(i, 'span', attrs={'class':'label label-default'}),
client.parseDOM(i, 'img', attrs={'class': 'ttip'}, ret='title'),
client.parseDOM(i, 'span', attrs={'class': 'glyphicon glyphicon-hd-video ttip'}, ret='title'),
) for i in r]
r = [(self.html_parser.unescape(i[0][0]), i[1][0], i[2][0], len(i[3]) > 0) for i in r]
r = [(client.parseDOM(i[0], 'iframe', ret='src'), i[1], i[2], i[3]) for i in r]
r = [(i[0][0], i[1], i[2], i[3]) for i in r if len(i[0]) > 0]
for i in r:
try:
host = urlparse.urlparse(i[0]).netloc
host = host.replace('www.', '').replace('embed.', '')
host = host.lower()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
lang, info = self.get_lang_by_type(i[1])
q = 'SD'
if 'Wysoka' in i[2]: q = 'HD'
if i[3] == True: q = '1080p'
sources.append({'source': host, 'quality': q, 'language': lang, 'url': i[0], 'info': info, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例12: get_raw
def get_raw(url, headers=None, data=None):
if headers is None:
headers = {
'User-Agent': client.randomagent(),
}
try:
new_url = get_proxy_url() % urllib.quote_plus(url)
headers['Referer'] = 'http://%s/' % urlparse.urlparse(new_url).netloc
request = urllib2.Request(new_url, headers=headers)
response = urllib2.urlopen(request, timeout=10)
return response
except:
pass
示例13: _createSession
def _createSession(self, userAgent=None, cookies=None, referer=None):
session = requests.Session()
session.headers.update(
{
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'User-Agent': userAgent if userAgent else randomagent(),
'Accept-Language': 'en-US,en;q=0.5',
'Referer': referer if referer else self.BASE_URL + '/',
'Upgrade-Insecure-Requests': '1',
'DNT': '1'
}
)
if cookies:
session.cookies.update(cookies)
return session
示例14: _createSession
def _createSession(self, userAgent=None, cookies=None, referer=None):
# Try to spoof a header from a web browser.
session = requests.Session()
session.headers.update(
{
'Accept': self.DEFAULT_ACCEPT,
'User-Agent': userAgent if userAgent else randomagent(),
'Accept-Language': 'en-US,en;q=0.5',
'Referer': referer if referer else self.BASE_URL + '/',
'DNT': '1'
}
)
if cookies:
session.cookies.update(cookies)
session.cookies[''] = '__test' # See _getSearch() for more info on this.
return session
示例15: __init__
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['ondarewatch.com', 'dailytvfix.com']
self.base_link = 'http://www.dailytvfix.com'
self.search_link = self.base_link + '/ajax/search.php'
self.ua = client.randomagent()
self.search_headers = {
'Host': self.base_link.replace('http://', '', 1),
'User-Agent': self.ua,
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Referer': self.base_link + '/',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'DNT': '1'
}