本文整理汇总了Python中tools.urlopen函数的典型用法代码示例。如果您正苦于以下问题:Python urlopen函数的具体用法?Python urlopen怎么用?Python urlopen使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了urlopen函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Episode
def Episode(self, stream_name, stream_id, page, totalpage):
data = tools.urlopen(self.app, stream_id, {'cache':3600})
if not data:
return []
soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
show_id = re.compile('show\/(.*?)\?size\=', re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
url = self.url_base + "/api/2.0/videos.json?free_only="+self.free+"&include_seasons=true&order=asc&shorter_cache=true&show_id="+show_id+"&sort=original_premiere_date&video_type%5B%5D=episode&video_type%5B%5D=game&items_per_page=" + str(self.pageSize) + "&position=" + str(self.pageSize * (page - 1)) + "&_user_pgid=1&_content_pgid=67&_device_id=1"
data = tools.urlopen(self.app, url)
json_data = json.loads(data)
if totalpage == "":
if int(json_data['total_count']) > self.pageSize:
totalpage = math.ceil(int(json_data['total_count']) / self.pageSize)
else:
totalpage = 1
episodelist = list()
for item in json_data['data']:
episode = CreateEpisode()
episode.name = stream_name
episode.id = self.url_base + '/watch/'+str(item['video']['id'])
episode.description = 'Episode: ' + str(item['video']['episode_number']) + ' - ' + str(item['video']['title'])
episode.thumbnails = 'http://ib1.huluim.com/video/'+str(item['video']['content_id'])+'?size=220x124'
episode.date = 'Season: ' + str(item['video']['season_number'])
episode.page = page
episode.totalpage = totalpage
episodelist.append(episode)
return episodelist
示例2: Play
def Play(self, stream_name, stream_id, subtitle):
data = tools.urlopen(self.app, stream_id, {'cache':3600})
streamid = re.compile('data-episode-id="(.*?)"', re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
if streamid == "": mc.ShowDialogNotification("Geen stream beschikbaar...")
data = tools.urlopen(self.app, 'http://pi.omroep.nl/info/security', {'cache':0})
soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
try:
key = soup.session.key.contents[0]
except:
mc.ShowDialogNotification("Kan de security key niet ophalen")
return
security = base64.b64decode(key)
securitystr = str(security).split('|')[1]
md5code = streamid + '|' + securitystr
md5code = md5.md5(md5code).hexdigest()
streamdataurl = 'http://pi.omroep.nl/info/stream/aflevering/' + str(streamid) + '/' + str(md5code).upper()
data = tools.urlopen(self.app, streamdataurl, {'cache':0}).decode('utf-8')
print data
xmlSoup = BeautifulSoup(data)
streamurl = xmlSoup.find(attrs={"compressie_formaat" : "wvc1"})
url_play = streamurl.streamurl.contents[0].replace(" ","").replace("\n","").replace("\t","")
play = CreatePlay()
play.path = url_play
if subtitle:
play.subtitle = self.GetSubtitle(security, streamid)
play.subtitle_type = 'sami'
return play
示例3: Episode
def Episode(self, stream_name, stream_id, page, totalpage):
url = self.url_base + stream_id
data = tools.urlopen(self.app, url, {'cache':3600})
if data == "":
mc.ShowDialogNotification("No episode found for " + str(stream_name))
return []
rssfeed = re.compile('</a> <a href="(.*?)">RSS</a>').search(data).group(1)
url = self.url_base + rssfeed
data = tools.urlopen(self.app, url, {'cache':3600})
soup = BeautifulStoneSoup(data, convertEntities="xml", smartQuotesTo="xml")
episodelist = list()
for info in soup.findAll('item'):
episode = CreateEpisode()
episode.name = info.title.contents[0]
episode.id = info.link.contents[0]
episode.description = info.description.contents[0]
episode.thumbnails = info.thumbnailimage.contents[0]
episode.date = info.pubdate.contents[0]
episode.page = page
episode.totalpage = totalpage
episodelist.append(episode)
return episodelist
示例4: GetPath
def GetPath(self, stream_id):
if len(stream_id.split('|')) > 1:
urlpart = stream_id.split('|')
url = urlpart[1] + '?url=' + urlpart[0]
data = self.ParseProcessor(url)
keys = data.keys()
if len(data) < 2:
return
try:
if data[0] == 'v2':
id = 1
except:
""""""
try:
if 'http' in data[0]:
id = 2
except:
""""""
if id == 1:
id_url = ''
id_cookie = ''
id_regex = ''
id_postdata = ''
if 's_url=' in keys: id_url = data['s_url=']
if 's_cookie=' in keys: id_cookie = data['s_cookie=']
if 'regex=' in keys: id_regex = data['regex=']
if 's_postdata=' in keys: id_postdata = data['s_postdata=']
if not id_url: id_url = urlpart[0]
params = {'cookie': str(id_cookie), 'post':str(id_postdata)}
data = tools.urlopen(self.app, str(id_url), params)
try:
path = re.compile(str(id_regex), re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
except:
path = ""
elif id == 2:
id_url = data[0]
id_regex = data[1]
data = tools.urlopen(self.app, str(id_url))
try:
path = re.compile(str(id_regex), re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
except:
path = ""
else:
path = ""
else:
path = stream_id
return path
示例5: Play
def Play(self, stream_name, stream_id, subtitle):
data = tools.urlopen(self.app, str(stream_id), {"cache": 3600})
contentId = (
re.compile('videoPlayer\\\\" value=\\\\"(.*?)\\\\"', re.DOTALL + re.MULTILINE).search(str(data)).group(1)
)
playerKey = (
re.compile('playerKey\\\\" value=\\\\"(.*?)\\\\"', re.DOTALL + re.MULTILINE).search(str(data)).group(1)
)
seed = "61773bc7479ab4e69a5214f17fd4afd21fe1987a"
amfHelper = BrightCoveHelper(playerKey, contentId, str(stream_id), seed)
streams = {}
for stream, bitrate in amfHelper.GetStreamInfo():
s = {}
s["uri"] = stream
streams[bitrate] = s
sort = []
for key in sorted(streams.iterkeys()):
sort.append(int(key))
sort = sorted(sort)
quality = sort.pop()
rtmp = streams[int(quality)]["uri"]
domain, file = rtmp.split("/&")
url = "http://www.bartsidee.nl/flowplayer/player.php?url=" + str(domain) + "&clip=" + str(file)
play = CreatePlay()
play.content_type = "video/x-flv"
play.path = quote_plus(url)
play.domain = "bartsidee.nl"
return play
示例6: Episode
def Episode(self, stream_name, stream_id, page, totalpage):
url = self.url_base + '/tag/' + str(stream_id) + '?page=' + str(page)
data = tools.urlopen(self.app, url, {'cache':3600})
soup = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES, smartQuotesTo="xml")
div_main = soup.findAll('div', {'class':'videoContainer'})[0]
try:
div_nav = soup.findAll('span', {'class':'pager-list'} )[0]
pages = div_nav.findAll(attrs={'class' : re.compile("^pager-next")})
totalpage = len(pages) +1
except:
totalpage = 1
episodelist = list()
for info in div_main.findAll('li'):
episode = CreateEpisode()
episode.name = info.h5.a.contents[0]
episode.id = info.a['href']
episode.thumbnails = info.a.img['src']
episode.page = page
episode.totalpage = totalpage
episodelist.append(episode)
return episodelist
示例7: Genre
def Genre(self, genre, filter, page, totalpage):
id = self.genre[genre]
url = self.url_base + '/ZDFmediathek/xmlservice/web/sendungVerpasst?startdate=' + id +'&enddate='+id+'&maxLength=50'
data = tools.urlopen(self.app, url, {'cache':2400})
soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
genrelist = list()
if len(data) < 20:
mc.ShowDialogNotification("No episode found for " + str(genre))
return []
teaser = soup.findAll('teaser')
for info in teaser:
if info.type.contents[0] == 'video':
title = info.find('title')
id = info.find('assetid')
airtime = info.find('airtime')
airtime = airtime.contents[0]
genreitem = CreateEpisode()
genreitem.name = title.contents[0]
genreitem.id = id.contents[0]
genreitem.date = airtime[-5:]
genreitem.page = page
genreitem.totalpage = totalpage
genrelist.append(genreitem)
if len(genrelist) < 1:
mc.ShowDialogNotification("No episode found for " + str(genre))
return genrelist
示例8: Episode
def Episode(self, stream_name, stream_id, page, totalpage):
url = self.url_base + '/ZDFmediathek/xmlservice/web/aktuellste?id='+stream_id+'&maxLength=50'
data = tools.urlopen(self.app, url, {'cache':3600})
soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
if len(data) < 5:
mc.ShowDialogNotification("No episode found for " + str(stream_name))
return []
teaser = soup.findAll('teaser')
episodelist = list()
for info in teaser:
if info.type.contents[0] == 'video':
title = info.find('title')
title = info.find('title')
detail = info.find('detail')
id = info.find('assetid')
airtime = info.find('airtime')
airtime = airtime.contents[0]
thumb = self.url_base + '/ZDFmediathek/contentblob/'+ str(id.contents[0]) +'/timg276x155blob'
episode = CreateEpisode()
episode.name = title.contents[0]
episode.id = id.contents[0]
episode.description = stream_name + ': ' + encodeUTF8(detail.contents[0])
episode.thumbnails = thumb
episode.date = airtime
episode.page = page
episode.totalpage = totalpage
episodelist.append(episode)
return episodelist
示例9: Play
def Play(self, stream_name, stream_id, subtitle):
url = str(stream_id)
data = tools.urlopen(self.app, url, {"cache": 3600})
file = re.compile('file \: "(.*?)"').search(data).group(1)
try:
domain = re.compile('streamer \: "(.*?)"').search(data).group(1)
except:
domain = False
if domain:
url = (
"http://www.bartsidee.nl/flowplayer/player.php?url="
+ str(domain)
+ "&clip="
+ str(file).replace(".flv", "")
)
play = CreatePlay()
play.content_type = "video/x-flv"
play.path = quote_plus(url)
play.domain = "bartsidee.nl"
else:
play = CreatePlay()
play.path = file
return play
示例10: Episode
def Episode(self, stream_name, stream_id, page, totalpage):
url = str(stream_id) + "/page/" + str(page)
data = tools.urlopen(self.app, url, {"cache": 3600})
soup = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES, smartQuotesTo="xml")
div_main = soup.findAll("div", {"id": "videogallery"})[0]
try:
div_nav = soup.findAll("div", {"class": "wp-pagenavi"})[0]
pages = div_nav.findAll(attrs={"class": re.compile("^page")})
totalpage = len(pages) + 1
except:
totalpage = 1
episodelist = list()
for info in div_main.findAll("div", {"class": "videoitem"}):
div1 = info.findAll("div", {"class": "thumbnail"})[0]
thumb = re.compile("background-image\: url\((.*?)\)").search(div1.div["style"]).group(1)
episode = CreateEpisode()
episode.name = div1.a["title"]
episode.id = div1.a["href"]
episode.description = " ".join(info.p.a.contents[0].split())
episode.thumbnails = thumb
episode.page = page
episode.totalpage = totalpage
episodelist.append(episode)
return episodelist
示例11: Play
def Play(self, stream_name, stream_id, subtitle):
id = re.compile('video\/(.*?)-').search(str(stream_id)).group(1)
url = 'http://eredivisielive.nl/content/playlist/website/%s_ere_lr.xml' % (id,)
data = tools.urlopen(self.app, url)
soup = BeautifulStoneSoup(data, convertEntities=BeautifulSoup.XML_ENTITIES, smartQuotesTo="xml")
domain = soup.findAll('videodock:streamer')[0].contents[0]
media = soup.findAll('media:content')
quality = []
files = {}
for i in media:
quality.append(int(i['bitrate']))
files[int(i['bitrate'])] = i['url']
quality = sorted(quality)
url = 'http://www.bartsidee.nl/flowplayer/player.php?url=' + str(domain) + '&clip=mp4:' + str(files[quality.pop()])
play = CreatePlay()
play.content_type = 'video/x-flv'
play.path = quote_plus(url)
play.domain = 'bartsidee.nl'
return play
示例12: Episode
def Episode(self, stream_name, stream_id, page, totalpage):
url = self.url_base + str(stream_id) + 'pagina/' + str(page) +'/'
data = tools.urlopen(self.app, url, {'cache':3600})
soup = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES, smartQuotesTo="xml")
div_main = soup.findAll('div', {'id':'video-overview'})[0]
try:
submenu = soup.findAll('div', {'id':'pagination-pages'})[0]
pages = submenu.findAll('a')
totalpage = len(pages) + 1
except:
totalpage = 1
episodelist = list()
for info in div_main.findAll('li'):
if info.findAll('span', {'class':'video-payment-noprice-button'}):
continue
episode = CreateEpisode()
episode.name = info.findAll('span', {'class':'title'})[0].contents[0]
episode.id = info.a['href']
episode.thumbnails = info.a.img['src']
episode.date = info.findAll('span', {'class':'date'})[0].contents[0]
episode.page = page
episode.totalpage = totalpage
episodelist.append(episode)
return episodelist
示例13: ParsePlaylist
def ParsePlaylist(self, url, max=False):
raw = tools.urlopen(self.app, url)
output = cStringIO.StringIO()
output.write(raw)
output.seek(0, 0)
data = csv.reader( output, delimiter="=", quoting=csv.QUOTE_NONE, quotechar='|')
if max != 0: number = max
else: number = 10000
item = {}
datalist = []
for i, line in enumerate(data):
if i < number:
if line == [] or line == ['#']:
if item: datalist.append(item)
item = {}
else:
if len(line) == 2:
item[line[0]] = line[1]
elif len(line) == 3:
item[line[0]] = line[1] + '=' + line[2]
elif len(line) > 3:
total = len(line) -2
item[line[0]] = line[1]
for i in range(2,total):
item[line[0]] = item[line[0]] + '=' + line[i]
else:
break
output.close()
return datalist
示例14: Episode
def Episode(self, stream_name, stream_id, page, totalpage):
url = str(stream_id) + '/page/' + str(page)
data = tools.urlopen(self.app, url, {'cache':3600})
soup = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES, smartQuotesTo="xml")
div_main = soup.findAll('div', {'id':'videogallery'})[0]
try:
div_nav = soup.findAll('div', {'class':'wp-pagenavi'} )[0]
pages = div_nav.findAll(attrs={'class' : re.compile("^page")})
totalpage = len(pages) +1
except:
totalpage = 1
episodelist = list()
for info in div_main.findAll('div', {'class':'videoitem'}):
div1 = info.findAll('div', {'class':'thumbnail'})[0]
thumb = re.compile('background-image\: url\((.*?)\)').search(div1.div['style']).group(1)
episode = CreateEpisode()
episode.name = div1.a['title']
episode.id = div1.a['href']
episode.description = ' '.join(info.p.a.contents[0].split())
episode.thumbnails = thumb
episode.page = page
episode.totalpage = totalpage
episodelist.append(episode)
return episodelist
示例15: Episode
def Episode(self, stream_name, stream_id, page, totalpage):
url = 'http://mercury.itv.com/api/html/dotcom/Episode/Programme/' + quote(stream_id)
data = tools.urlopen(self.app, url, {'cache':3600})
soup = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES, smartQuotesTo="xml")
if len(data) < 10:
mc.ShowDialogNotification("No episode found for " + str(stream_name))
return []
table = soup.find('tbody')
episodelist = list()
for info in table.findAll('tr'):
time = info.find('td',{'class':'t_time'})
duration = info.find('td',{'class':'t_duration'})
details = info.find('td',{'class':'t_details'})
episode = CreateEpisode()
episode.name = stream_name
episode.id = self.url_base + details.a['href']
episode.description = duration.contents[0] +' - '+ details.span.contents[0]
episode.thumbnails = details.a.img['src']
episode.date = time.contents[2]
episode.page = page
episode.totalpage = totalpage
episodelist.append(episode)
return episodelist