本文整理汇总了Python中resources.lib.modules.client.replaceHTMLCodes函数的典型用法代码示例。如果您正苦于以下问题:Python replaceHTMLCodes函数的具体用法?Python replaceHTMLCodes怎么用?Python replaceHTMLCodes使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了replaceHTMLCodes函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
for links in self.zen_url:
# print ("YMOVIES SOURCES", movielink, cookies, referer)
if self.base_link in links:
result = client.request(links)
match = re.compile("file:\s*'(.*?)',.+?abel:'(.*?)',", re.DOTALL).findall(result)
for url,quality in match:
# print ("WATCHOVER SOURCES", url)
if "1080" in quality: quality = "1080p"
elif "720" in quality: quality = "HD"
else: quality = "SD"
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Watchover', 'url': url, 'direct': True, 'debridonly': False})
else:
url = links
try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
except: host = 'Openload'
else: quality = "SD"
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Watchover', 'url': url, 'direct': False, 'debridonly': False})
return sources
except:
return sources
示例2: imdb_person_list
def imdb_person_list(self, url):
try:
result = client.request(url)
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'tr', attrs = {'class': '.+? detailed'})
except:
return
for item in items:
try:
name = client.parseDOM(item, 'a', ret='title')[0]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = re.findall('(nm\d*)', url, re.I)[0]
url = self.person_link % url
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
image = client.parseDOM(item, 'img', ret='src')[0]
if not ('._SX' in image or '._SY' in image): raise Exception()
image = re.sub('_SX\d*|_SY\d*|_CR\d+?,\d+?,\d+?,\d*','_SX500', image)
image = client.replaceHTMLCodes(image)
image = image.encode('utf-8')
self.list.append({'name': name, 'url': url, 'image': image})
except:
pass
return self.list
示例3: movie
def movie(self, imdb, title, year):
try:
query = self.search_link % urllib.quote_plus(title)
query = urlparse.urljoin(self.base_link, query)
result = self.request(query, 'movie_table')
result = client.parseDOM(result, 'div', attrs = {'class': 'movie_table'})
title = cleantitle.get(title)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'img', ret='alt')) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [i for i in result if any(x in i[1] for x in years)]
result = [i[0] for i in result if title == cleantitle.get(i[1])][0]
url = client.replaceHTMLCodes(result)
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
except: pass
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
except: pass
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例4: imdb_user_list
def imdb_user_list(self, url):
try:
result = client.request(url)
items = client.parseDOM(result, 'li', attrs = {'class': 'ipl-zebra-list__item user-list'})
except:
pass
for item in items:
try:
name = client.parseDOM(item, 'a')[0]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = url.split('/list/', 1)[-1].strip('/')
url = self.imdblist_link % url
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: utils.title_key(k['name']))
return self.list
示例5: episode
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.urljoin(self.base_link, url)
result = client.request(url)
result = result.decode('iso-8859-1').encode('utf-8')
result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})
title = cleantitle.get(title)
premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(premiered)[0]
premiered = '%s %01d %s' % (premiered[1].replace('01','January').replace('02','February').replace('03','March').replace('04','April').replace('05','May').replace('06','June').replace('07','July').replace('08','August').replace('09','September').replace('10','October').replace('11','November').replace('12','December'), int(premiered[2]), premiered[0])
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), client.parseDOM(i, 'span', attrs = {'class': 'tv_num_versions'})) for i in result]
result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]
url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
if len(url) == 0: url = [i for i in result if premiered == i[2]]
if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]
url = client.replaceHTMLCodes(url[0][0])
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
示例6: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
for movielink,title in self.zen_url:
mylink = client.request(movielink)
if "1080" in title: quality = "1080p"
elif "720" in title: quality = "HD"
else: quality = "SD"
for item in parse_dom(mylink, 'div', {'class': 'entry-content'}):
match = re.compile('<a href="(.+?)">(.+?)</a>').findall(item)
for url,title in match:
myurl = str(url)
if not any(value in myurl for value in ['sendspace','imagebam','imgserve','histat','crazy4tv','facebook','.rar', 'subscene','.jpg','.RAR', 'postimage', 'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up','imdb']):
if not any(value in title for value in ['sendspace','imagebam','imgserve','histat','crazy4tv','facebook','.rar', 'subscene','.jpg','.RAR', 'postimage', 'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up','imdb']):
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
except: host = 'Videomega'
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Bmoviez', 'url': url, 'direct': False, 'debridonly': True})
return sources
except:
return sources
示例7: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
for movielink in self.zen_url:
mylink = client.request(movielink)
r = client.parseDOM(mylink, 'div', attrs = {'class': 'entry-content'})
for links in r:
try:
match = re.compile('href="([^"]+)[^>]+>([^<]+)').findall(links)
for url,title in match:
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
title = title.encode('utf-8')
if "1080" in title: quality = "1080p"
elif "720" in title: quality = "HD"
else: quality = "SD"
info = ''
if "hevc" in title.lower(): info = "HEVC"
if "3d" in title.lower(): info = "3D"
# print ("CRAZY4AD FOUND LINKS", url,title)
if not any(value in title for value in ['uploadkadeh','wordpress','crazy4tv','imdb.com','youtube','userboard','kumpulbagi','mexashare','myvideolink.xyz', 'myvideolinks.xyz' , 'costaction', 'crazydl','.rar', '.RAR', 'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up', 'adf.ly','.jpg','.jpeg']):
if not any(value in url for value in ['uploadkadeh','wordpress','crazy4tv','imdb.com','youtube','userboard','kumpulbagi','mexashare','myvideolink.xyz', 'myvideolinks.xyz' , 'costaction', 'crazydl','.rar', '.RAR', 'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up', 'adf.ly','.jpg','.jpeg']):
try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
except: host = 'Rapidgator'
if "sh.st" in url: host = 'SHST'
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Crazy', 'url': url, 'info': info,'direct': False, 'debridonly': True})
except:
pass
return sources
except:
return sources
示例8: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
r = client.request(url)
r = dom_parser2.parse_dom(r, 'div', {'class': 'll-item'})
r = [(dom_parser2.parse_dom(i, 'a', req='href'), \
dom_parser2.parse_dom(i, 'div', {'class': 'notes'})) \
for i in r if i]
r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0].content if i[1] else 'None') for i in r]
for i in r:
try:
url = i[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
valid, host = source_utils.is_host_valid(i[1], hostDict)
if not valid: continue
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
info = []
quality, info = source_utils.get_release_quality(i[2], i[2])
info = ' | '.join(info)
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
示例9: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
for movielink,title in url:
if not "=episode" in title:
mylink = client.request(movielink)
match = re.compile('<a rel="nofollow" href="(.+?)">').findall(mylink)
for url in match:
if "1080" in url: quality = "1080p"
elif "720" in url: quality = "HD"
else: quality = "SD"
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
sources.append({'source': 'cdn', 'quality': quality, 'provider': 'Fdl', 'url': url, 'direct': True, 'debridonly': False})
elif "=episode" in title:
if "1080" in title: quality = "1080p"
elif "720" in title: quality = "HD"
else: quality = "SD"
url = movielink
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
sources.append({'source': 'cdn', 'quality': quality, 'provider': 'Fdl', 'url': url, 'direct': False, 'debridonly': False})
sources = [i for i in sources if i['url'].split('?')[0].split('&')[0].split('|')[0].rsplit('.')[-1].replace('/', '').lower() in ['avi','mkv','mov','mp4','xvid','divx']]
return sources
except:
return sources
示例10: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
self.movielinks = []
for movielink,title in self.url:
quality = "SD"
if "1080" in title: quality = "1080p"
elif "720" in title: quality = "HD"
mylink = client.request(movielink)
posts = client.parseDOM(mylink, 'div', attrs = {'class': 'postContent'})
for item in posts:
match = re.compile('href="([^"]+)').findall(item)
for url in match:
print "SCENEDOWN URLS PASSED %s" % url
if not any(value in url for value in ['scene-rls.com','nfo.','sample','.nfo','uploadkadeh','wordpress','crazy4tv','imdb.com','youtube','userboard','kumpulbagi','mexashare','myvideolink.xyz', 'myvideolinks.xyz' , 'costaction', 'crazydl','.rar', '.RAR', 'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up', 'adf.ly','.jpg','.jpeg']):
if any(value in url for value in hostprDict):
try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
except: host = 'Videomega'
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Scenedown', 'url': url, 'direct': False, 'debridonly': True})
return sources
except:
return sources
示例11: radio181fm
def radio181fm():
try:
url = 'http://www.181.fm/index.php?p=mp3links'
result = client.request(url)
index = []
items = client.parseDOM(result, 'td', attrs={'id': 'rightlinks'})
except:
pass
for item in items:
try:
if not item.startswith('http://'): raise Exception()
name = items[:items.index(item)]
name = [i for i in name if not 'http://' in i][-1]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = item.split('<')[0].replace(':', '/').replace('///', '://')
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
index.append({'name': name, 'url': url, 'thumb': '0', 'image': radio181fmicon, 'fanart': radio181fmfanart})
except:
pass
index = [i for x, i in enumerate(index) if i not in index[x+1:]]
index = sorted(index, key=lambda k: k['name'])
for i in index: addDirectoryItem(i['name'], i['url'], i['thumb'], i['image'], i['fanart'])
endDirectory()
示例12: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
for movielink,title in self.zen_url:
mylink = OPEN_URL(movielink).content
if "1080" in title: quality = "1080p"
elif "720" in title: quality = "HD"
else: quality = "SD"
print ("BMOVIES SOURCES movielink", movielink)
for item in parse_dom(mylink, 'div', {'class': 'entry-content'}):
match = re.compile('<a href="(.+?)">(.+?)</a>').findall(item)
for url,title in match:
myurl = str(url)
if any(value in myurl.lower() for value in hostprDict):
if not any(value in myurl.lower() for value in self.blacklist_zips):
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
except: host = 'Videomega'
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Bmoviez', 'url': url, 'direct': False, 'debridonly': True})
return sources
except:
return sources
示例13: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
for movielink,title in self.zen_url:
quality = "SD"
mylink = client.request(movielink)
pattern = 'href=([^\s]+)'
for item in parse_dom(mylink, 'div', {'class': 'entry-content'}):
match = re.compile('href=([^\s]+)').findall(item)
for url in match:
if not any(value in url for value in ['bankupload','24uploading','crazy4tv','facebook','.rar', 'subscene','.jpg','.RAR', 'postimage', 'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up','imdb']):
try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
except: host = 'Videomega'
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Allrls', 'url': url, 'direct': False, 'debridonly': True})
return sources
except:
return sources
示例14: sources
def sources(self, url, hostDict, hostprDict):
try:
sources = []
headers = {'Accept-Language': 'en-US,en;q=0.5', 'User-Agent': random_agent()}
for movielink,title in self.genesisreborn_url:
quality = quality_tag(title)
html = BeautifulSoup(requests.get(movielink, headers=headers, timeout=10).content)
containers = html.findAll('div', attrs={'class': 'txt-block'})
for result in containers:
print("THREEMOVIES LINKS ",result)
links = result.findAll('a')
for r_href in links:
url = r_href['href']
myurl = str(url)
if any (value in myurl for value in hostprDict):
try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
except: host = 'Threemovies'
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Threemovies', 'url': url, 'direct': False, 'debridonly': True})
return sources
except:
return sources
示例15: anime_list_3
def anime_list_3(self, url):
try:
url = urlparse.urljoin(self.anime_link, url)
result = client.request(url)
items = client.parseDOM(result, 'div', attrs={'id': 'left_content'})[0]
items = client.parseDOM(items, 'zi')
except:
return
for item in items:
try:
title = client.parseDOM(item, 'a')[0]
if '>Movie<' in title: raise Exception()
title = re.sub('<.+?>|</.+?>|\\\\|\n', '', title).strip()
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = urlparse.urljoin(self.anime_link, url)
url = url.replace(' ','%20')
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
image = client.parseDOM(item, 'img', ret='src')[0]
image = urlparse.urljoin(self.anime_link, image)
image = image.encode('utf-8')
self.list.append({'title': title, 'url': url, 'image': image})
except:
pass
return self.list