本文整理汇总了Python中xbmctorrent.utils.url_get函数的典型用法代码示例。如果您正苦于以下问题:Python url_get函数的具体用法?Python url_get怎么用?Python url_get使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了url_get函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: proc_close
def proc_close():
if not proc.poll():
plugin.log.info("Trying to stop torrent2http at http://%s/shutdown" % proc.bind_address)
try:
url_get("http://%s/shutdown" % proc.bind_address, with_immunicity=False)
except Exception, e:
plugin.log.info('Failed to sto torrent2http')
map(plugin.log.info, traceback.format_exc(e).split('\n'))
示例2: torrents3d_play
def torrents3d_play(article):
import xbmcgui
from bs4 import BeautifulSoup
from contextlib import closing
from urlparse import urljoin
from xbmctorrent.magnet import generate_magnet
from xbmctorrent.utils import SafeDialogProgress
article = int(article)
with closing(SafeDialogProgress(delay_close=0)) as dialog:
dialog.create(plugin.name)
dialog.update(percent=0, line1=u"Получение информации о релизе...", line2="", line3="")
url = urljoin(BASE_URL, "article/%d" % article)
try:
html_data = url_get(url, headers=HEADERS)
soup = BeautifulSoup(html_data, "html5lib")
torrent_href = soup.find("a", class_="genmed")
if not torrent_href:
dialog.update(percent=50, line2=u"Требуется авторизация. Авторизация...")
if not plugin.get_setting("t3d_login") and not plugin.get_setting("t3d_passwd"):
plugin.notify("Проверьте настройки авторизации.", delay=15000)
return
html_data = _torrents3d_login(url)
soup = BeautifulSoup(html_data, "html5lib")
torrent_href = soup.find("a", class_="genmed")
if not torrent_href:
xbmcgui.Dialog().ok(plugin.name, "Авторизация неудалась. Проверьте настройки авторизации.")
return
dialog.update(percent=100, line2=u"Обработка данных.")
from bencode import bdecode
title = "[%s] %s" % _torrents3d_cleantitle(soup.find("a", class_="tt-text").text)
torrent_data = url_get(torrent_href["href"], headers=HEADERS)
metadata = bdecode(torrent_data)
plugin.redirect(plugin.url_for("play", uri=generate_magnet(metadata, uenc(title))))
except Exception:
plugin.log.error("Cannot get data from remote server")
xbmcgui.Dialog().ok(plugin.name, u"Не удалось получить данные от сервера")
return
示例3: search
def search(name, complete=False):
from xbmctorrent.caching import shelf
import hashlib
search_hash = hashlib.sha1(name).hexdigest()
with shelf("com.thetvdb.search.%s" % search_hash) as show:
if not show:
import re
import xml.etree.ElementTree as ET
from xbmctorrent.utils import url_get
dom = ET.fromstring(
url_get(
"%s/api/GetSeries.php" % BASE_URL,
params={"seriesname": name},
headers=HEADERS,
with_immunicity=False,
)
)
if not len(dom):
return
meta = dom2dict(dom[0])
if not complete:
return update_image_urls(meta)
show.update(get(meta["id"]))
return show
示例4: ilcorsaronero_page
def ilcorsaronero_page(root, page):
from bs4 import BeautifulSoup
from urlparse import urljoin
from xbmctorrent.utils import url_get
page = int(page)
html_data = url_get(urljoin(BASE_URL, "%s&page=%d" % (root, page)), headers=HEADERS)
soup = BeautifulSoup(html_data, "html5lib")
nodes = soup.findAll("a", class_="tab")
for node in nodes:
size, null, data, seeds, peers = map(lambda x: (x.font.text if x.font is not None else None), node.parent.parent.findAll("td")[-5:])
text = "%s (%s S:%s P:%s %s)" % (node.text, size, seeds, peers, data)
yield {
"label": text,
"path": plugin.url_for("ilcorsaronero_play", uri=node["href"]),
"is_playable": True,
}
if root.find('search') == -1:
yield {
"label": ">> Next page",
"path": plugin.url_for("ilcorsaronero_page", root=root, page=page + 1),
"is_playable": False,
}
示例5: kat_page
def kat_page(root, page, sort_field, sort_order):
from urlparse import urljoin
from xbmctorrent.scrapers import rss
from xbmctorrent.utils import url_get
content_type = plugin.request.args_dict.get("content_type")
if content_type:
plugin.set_content(content_type)
page = int(page)
page_data = url_get(
urljoin(BASE_URL, "%s/%d" % (root, page)),
headers=HEADERS,
params={"rss": "1", "field": sort_field, "sorder": sort_order},
)
for item in rss.parse(page_data, content_type):
yield item
yield {
"label": ">> Next page",
"path": plugin.url_for(
"kat_page",
root=root,
page=page + 1,
sort_field=sort_field,
sort_order=sort_order,
**plugin.request.args_dict
),
}
示例6: iplay_page
def iplay_page(cat, page):
import re
from bs4 import BeautifulSoup
from urlparse import urljoin
from xbmctorrent.utils import url_get
page = int(page)
url = urljoin(BASE_URL, "test/proxy.php")
html_data = url_get(url, params = {"cat": cat, "page": page}, headers = HEADERS)
soup = BeautifulSoup(html_data, "html5lib")
nodes = soup.findAll('a', 'torrent')
next_page = {
"label": "Next page...",
"path": plugin.url_for("iplay_page", cat = cat, page = page + 1),
"is_playable": False,
}
for node in nodes:
text = "%s" % node.get('title')
torrent_node = node.parent.find('img', {"class": "dld"}).parent
yield {
"label": text,
"path": plugin.url_for("play", uri=torrent_node["href"]),
"is_playable": False,
}
#print 'DBG URL:' + url
yield next_page
示例7: get_torrent_info
def get_torrent_info(url):
from bs4 import BeautifulSoup
from xbmctorrent.utils import first, url_get
response = url_get(url)
soup = BeautifulSoup(response, "html5lib")
movie = first(soup.select('td[itemtype="http://schema.org/Product"] div.font11px a[href="/movies/"]')) is not None
serie = first(soup.select('td[itemtype="http://schema.org/Product"] div.font11px a[href="/tv/"]')) is not None
def itemprop(prop, default=None):
node = first(soup.select('[itemprop="%s"]' % prop))
return node and node.text or default
def itemprops(prop, default=[]):
nodes = soup.select('[itemprop="%s"]' % prop)
return [node.text for node in nodes] or default
return {
"movie": movie and not serie,
"serie": serie and not movie,
"seeders": itemprop("seeders"),
"leechers": itemprop("leechers"),
"title": itemprop("name"),
"quality": itemprop("quality"),
"genres": itemprops("genre"),
"magnet_url": first(soup.select(".magnetlinkButton"))["href"],
"poster": first(soup.select('a.movieCover img'), {}).get("href"),
"cast": [cast.text for cast in soup.select('[itemprop="name"] a')],
"imdb_id": find_imdb_id(soup),
}
示例8: btdigg_page
def btdigg_page(query, sort, page):
from bs4 import BeautifulSoup
from xbmctorrent.utils import url_get
html_data = url_get("%s/search" % BASE_URL, headers=HEADERS, params={
"order": sort,
"q": query,
"p": page,
})
soup = BeautifulSoup(html_data, "html5lib")
name_nodes = soup.findAll("td", "torrent_name")
attr_nodes = soup.findAll("table", "torrent_name_tbl")[1::2]
for name_node, attr_node in zip(name_nodes, attr_nodes):
attrs = attr_node.findAll("span", "attr_val")
title = "%s (%s, DLs:%s)" % (name_node.find("a").text, attrs[0].text, attrs[2].text)
yield {
"label": title,
"path": plugin.url_for("play", uri=attr_node.find("a")["href"]),
"is_playable": True,
}
yield {
"label": ">> Next page",
"path": plugin.url_for("btdigg_page", query=query, sort=sort, page=int(page) + 1),
"is_playable": False,
}
示例9: cpb_listAll
def cpb_listAll(cat, page):
from bs4 import BeautifulSoup
from xbmctorrent.utils import url_get
url_adr = "%s/view_cat.php?categorie=%s&page=%s" % (BASE_URL, cat, page) #view_cat.php?categorie=films&page=1
html_data = url_get(url_adr)
soup = BeautifulSoup(html_data, "html5lib")
name_nodes = soup.findAll('div', re.compile('ligne[0,1]'))
for name_node in name_nodes:
title = name_node.find("a").text
tds = name_node.parent.findAll("td")
seed = name_node.find("div","up").text
leech = name_node.find("div","down").text
color = getColor(seed, leech)
title = title + " [COLOR %s][S:%s|L:%s][/COLOR]" %(color, seed, leech)
html_uri=name_node.find("a")["href"]
torrent_basename = os.path.basename(html_uri)
uri_addr= BASE_URL + "/_torrents/" + torrent_basename.replace(".html",".torrent")
img_addr = BASE_URL + "/_pictures/" + torrent_basename.replace(".html",".jpg")
yield {
"label": title,
"path": plugin.url_for("play", uri=uri_addr),
"is_playable": True,
"thumbnail" : img_addr ,
"properties": {
"fanart_image" : img_addr ,
} ,
}
yield {
"label": ">> Next page",
"path": plugin.url_for("cpb_listAll", cat=cat, page=int(page) + 1),
"is_playable": False,
}
示例10: cpb_last
def cpb_last(type):
from bs4 import BeautifulSoup
from xbmctorrent.utils import url_get
url_adr = "%s/derniers-torrents.php" % BASE_URL
if type != TYPE_A:
url_adr = url_adr + "?filtre=%s" % type
html_data = url_get(url_adr)
soup = BeautifulSoup(html_data, "html5lib")
name_nodes = soup.findAll('div', re.compile('ligne[0,1]'))
for name_node in name_nodes:
title = name_node.find("a").text
tds = name_node.parent.findAll("td")
seed = name_node.find("div","up").text
leech = name_node.find("div","down").text
color = getColor(seed, leech)
title = title + " [COLOR %s][S:%s|L:%s][/COLOR]" %(color, seed, leech)
html_uri=name_node.find("a")["href"]
torrent_basename = os.path.basename(html_uri)
uri_addr= BASE_URL + "/_torrents/" + torrent_basename.replace(".html",".torrent")
img_addr = BASE_URL + "/_pictures/" + torrent_basename.replace(".html",".jpg")
yield {
"label": title,
"path": plugin.url_for("play", uri=uri_addr),
"is_playable": True,
"thumbnail" : img_addr ,
"properties": {
"fanart_image" : img_addr ,
} ,
}
示例11: cpb_page
def cpb_page(cat, query, sort, page):
log.debug("THE CAT = " + cat + " & THE Q = " + query)
if not "cpasbien" in query:
url_adr = BASE_URL + "/" + cat + "/" + query.replace(" ", "+") + "/page-" + str(page) + "," + sort
else:
url_adr = BASE_URL + "/view_cat.php?categorie=" + cat + "&page=" + str(page) + "&trie=" + sort[5:]
log.debug("THE URL = " + url_adr)
html_data = url_get(url_adr)
soup = BeautifulSoup(html_data, "html5lib")
name_nodes = soup.findAll("td", "torrent-aff")
for name_node in name_nodes:
title = name_node.find("a").text
html_uri = name_node.find("a")["href"]
torrent_basename = os.path.basename(html_uri)
uri_addr = BASE_URL + "/_torrents/" + torrent_basename.replace(".html", ".torrent")
img_addr = BASE_URL + "/_pictures/" + torrent_basename.replace(".html", ".jpg")
yield {
"label": title,
"path": plugin.url_for("play", uri=uri_addr),
"is_playable": True,
"thumbnail": img_addr,
"properties": {"fanart_image": img_addr},
}
yield {
"label": ">> Next page",
"path": plugin.url_for("cpb_page", cat=cat, query=query, sort=sort, page=int(page) + 1),
"is_playable": False,
}
示例12: piratebay_page
def piratebay_page(root, page):
import re
from bs4 import BeautifulSoup
from urlparse import urljoin
from xbmctorrent.utils import url_get
page = int(page)
html_data = url_get(urljoin(BASE_URL, "%s/%d/7/100,200,500" % (root, page)), headers=HEADERS)
soup = BeautifulSoup(html_data, "html5lib")
nodes = soup.findAll("div", "detName")
for node in nodes:
seeds, peers = map(lambda x: x.text, node.parent.parent.findAll("td")[2:])
magnet_node = node.parent.findAll("a")[1]
desc_node = node.parent.findAll("font", "detDesc")[0]
size = re.search("Size (.*?),", desc_node.text).group(1)
text = "%s (%s S:%s P:%s)" % (node.a.text, size.replace(" ", " "), seeds, peers)
yield {
"label": text,
"path": plugin.url_for("play", uri=magnet_node["href"]),
"is_playable": True,
}
yield {
"label": ">> Next page",
"path": plugin.url_for("piratebay_page", root=root, page=page + 1),
"is_playable": False,
}
示例13: get_banners
def get_banners(show_id):
import xml.etree.ElementTree as ET
from xbmctorrent.utils import url_get
r = url_get("%s/banners.xml" % show_base_url(show_id), headers=HEADERS)
dom = ET.fromstring(r)
if not len(dom):
return
return update_image_urls(dom2dict(dom))["banner"]
示例14: ilcorsaronero_play
def ilcorsaronero_play(uri):
import re
from bs4 import BeautifulSoup
from xbmctorrent.utils import url_get
html_data = url_get(uri, headers=HEADERS)
soup = BeautifulSoup(html_data, "html5lib")
magnet_node = soup.findAll("a", class_="forbtn", href=re.compile("magnet"))
plugin.redirect(plugin.url_for("play", uri=magnet_node[0]["href"]))
示例15: proc_close
def proc_close():
if proc.poll() is None:
try:
url_get("http://%s/shutdown" %
proc.bind_address, with_immunicity=False)
except Exception:
pass
finished = False
os.close(log_thread.write_fd)
start = time.time()
while (time.time() - start) < 10:
time.sleep(0.2)
if proc.poll() is not None:
finished = True
break
if not finished:
logger("timeout occured while shutting down")
proc.kill()
proc.wait()