本文整理汇总了Python中supybot.utils.web.getUrl函数的典型用法代码示例。如果您正苦于以下问题:Python getUrl函数的具体用法?Python getUrl怎么用?Python getUrl使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了getUrl函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _query_freebase
def _query_freebase(self, work_type, thing):
key = conf.get(conf.supybot.plugins.Cast.FreebaseApiKey)
props = FREEBASE_TYPES[work_type]
url = "https://www.googleapis.com/freebase/v1/search?query=%s&type=%s&key=%s" % (web.urlquote(thing),props['type'],key)
response = simplejson.loads(web.getUrl(url, headers=HEADERS))
if len(response['result']) == 0:
return None
else:
fbid = response['result'][0]['id']
query = {
"id": fbid,
"type": props['type'],
"name": None,
"limit": 1
}
query.update(props['subquery'])
url = "https://www.googleapis.com/freebase/v1/mqlread?query=%s&key=%s" % (web.urlquote(simplejson.dumps(query)),key)
response = simplejson.loads(web.getUrl(url, headers=HEADERS))
result = response['result']
if result is None:
return None
else:
return({
'props': props,
'url': "http://www.freebase.com" + result['id'],
'title': result['name'],
'characters': props['extractor'](result)
})
示例2: _query_freebase
def _query_freebase(self, work_type, thing):
props = FREEBASE_TYPES[work_type]
url = "https://api.freebase.com/api/service/search?query=%s&type=%s" % (web.urlquote(thing),props['type'])
response = simplejson.loads(web.getUrl(url, headers=HEADERS))
if len(response['result']) == 0:
return None
else:
fbid = response['result'][0]['id']
query = {
'escape': False,
'query': {
"id": fbid,
"type": props['type'],
"name": None,
"limit": 1
}
}
query['query'].update(props['subquery'])
url = "https://api.freebase.com/api/service/mqlread?query=%s" % web.urlquote(simplejson.dumps(query))
response = simplejson.loads(web.getUrl(url, headers=HEADERS))
result = response['result']
if result is None:
return None
else:
return({
'props': props,
'url': "http://www.freebase.com" + result['id'],
'title': result['name'],
'characters': props['extractor'](result)
})
示例3: loadByName
def loadByName(self, name):
if name.isdigit():
return self.loadById(name)
import supybot.utils.web as web
lines = filter(lambda x:'<id>' in x, web.getUrl('http://api.erpk.org/citizen/search/' + name '/1.xml?key=nIKh0F7U').split('\n'))
if not lines:
return None
line = lines[0]
id = line.split('>')[1].split('<')[0]
return self.loadById(id)
示例4: urbandict
def urbandict(self, irc, msg, args, opts, words):
"""<phrase>
Returns the definition and usage of <phrase> from UrbanDictionary.com.
"""
use_definition = None
for (opt,arg) in opts:
if opt == 'def':
use_definition = int(arg)
terms = ' '.join(words)
url = 'http://www.urbandictionary.com/define.php?term=%s' \
% web.urlquote(terms)
html = web.getUrl(url)
doc = fromstring(html)
if len(doc.xpath('//div[@id="not_defined_yet"]')):
irc.error('No definition found.', Raise=True)
definitions = []
for div in doc.xpath('//div[@class="definition"]'):
text = div.text_content()
if div.getnext().tag == 'div' \
and div.getnext().attrib.get('class', None) == 'example':
text += ' [example] ' + div.getnext().text_content() + ' [/example] '
text = re.sub(r'[\\\r\\\n]+', ' ', text)
definitions.append(text)
if use_definition != None:
definitions = [definitions[use_definition-1]]
reply_msg = '%s: %s' % (terms, '; '.join(definitions))
irc.reply(reply_msg.encode('utf8'))
示例5: _fetch_json
def _fetch_json(self, url):
doc = web.getUrl(url, headers=HEADERS)
try:
json = simplejson.loads(doc)
except ValueError:
return None
return json
示例6: trends
def trends(self, irc, msg, args, timeframe):
"""@trends [current|daily|weekly]
Return top trending Twitter topics for one of three timeframes:
current, daily or weekly. Default is current.
"""
if not timeframe:
timeframe = "current"
if timeframe not in ["current", "daily", "weekly"]:
irc.reply("Invalid timeframe. Must be one of 'current', 'daily' or 'weekly'")
return
url = "http://search.twitter.com/trends/%s.json" % timeframe
try:
doc = web.getUrl(url, headers=HEADERS)
json = simplejson.loads(doc)
except:
irc.reply("uh-oh, something went awry")
return
trends = json["trends"].values()[0]
tnames = [x["name"] for x in trends]
resp = ", ".join(["%d. %s" % t for t in zip(range(1, len(tnames) + 1), tnames)])
irc.reply(resp.encode("utf8", "ignore").replace("\n", " ").strip(" "))
示例7: _query_tmdb
def _query_tmdb(self, cmd, args):
url = "http://api.themoviedb.org/2.1/%s/en/json/%s/%s" % (cmd,TMDBK,urllib.quote(str(args)))
doc = web.getUrl(url, headers=HEADERS)
try:
json = simplejson.loads(doc)
except ValueError:
return None
return json
示例8: _chefjivevalleypig
def _chefjivevalleypig(self, irc, type, s):
params = urlencode(dict(input=s,type=type))
url = 'http://www.cs.utexas.edu/users/jbc/bork/bork.cgi?' + params
resp = web.getUrl(url, headers=HEADERS)
resp = re.sub('&(ampway|emp);','&',resp)
resp = BS.BeautifulStoneSoup(resp,convertEntities=BS.BeautifulStoneSoup.HTML_ENTITIES).contents[0]
resp = re.sub('\n', ' ', resp)
irc.reply(resp.encode('utf-8', 'ignore').strip())
示例9: drunk
def drunk(self, irc, msg, s):
params = urlencode(dict(text=s, voice="drunk"))
url = "http://www.thevoicesofmany.com/text.php?" + params
resp = web.getUrl(url, headers=HEADERS)
soup = BS.BeautifulSoup(resp)
try:
translated = soup.find("td", id="top").blockquote.string
except:
irc.reply("oops, didn't work")
irc.reply(resp.encode("utf-8", "ignore").strip())
示例10: _yelp_api
def _yelp_api(self, params):
p = params.copy()
p["ywsid"] = YWSID
url = "http://api.yelp.com/business_review_search?" + urlencode(p)
doc = web.getUrl(url, headers=HEADERS)
try:
json = simplejson.loads(doc)
except ValueError:
return None
return json
示例11: drunk
def drunk(self, irc, msg, s):
params = urlencode(dict(text=s,voice='drunk'))
url = 'http://www.thevoicesofmany.com/text.php?' + params
resp = web.getUrl(url, headers=HEADERS)
soup = BS.BeautifulSoup(resp)
try:
translated = soup.find('td', id='top').blockquote.string
except:
irc.reply("oops, didn't work")
irc.reply(resp.encode('utf-8', 'ignore').strip())
示例12: _search
def _search(self, term):
xml = web.getUrl(SERVICE_URL % urlencode({"QueryString": term}), headers=HEADERS)
parser = etree.XMLParser(ns_clean=True, remove_blank_text=True)
tree = etree.parse(StringIO(xml), parser)
results = []
for r in self._xpath(tree, "//ns:Result"):
label = self._xpath(r, "ns:Label/text()", 0)
uri = self._xpath(r, "ns:URI/text()", 0)
category = self._xpath(r, "ns:Categories/ns:Category/ns:Label/text()", 0)
results.append((label, category, uri))
return results
示例13: _getJsonResponse
def _getJsonResponse(self,url,retries = 2):
try:
log.debug('Retrieving: %s' % (url))
doc = web.getUrl(url, headers=HEADERS)
log.debug('Response: %s' % (doc))
response = simplejson.loads(doc)
return response
except web.Error, e:
log.warning('Error: %s',str(e))
if retries > 0:
log.warning('Retries left: %d' % (retries))
return self._getJsonResponse(url,retries=retries-1)
示例14: lyricsmania_urls
def lyricsmania_urls(artist, title):
title_norm = normalize(title)
artist_norm = normalize(artist)
url = 'http://www.lyricsmania.com/%s_lyrics_%s.html' % \
(title_norm, artist_norm)
logger.info("Fetching %s" % url)
html = web.getUrl(url, headers=HEADERS)
if html.find('not in our archive') != -1:
raise LyricsNotFound
doc = fromstring(html)
link = doc.xpath('//a[starts-with(@href, "/print")]')[0]
return (url, 'http://www.lyricsmania.com/%s' % link.attrib['href'])
示例15: sabram
def sabram(self, irc, msg, args):
""" [<text>]
Get @sabram to falsely attribute a quote to Cliff!
"""
template = '<sabram> Cliff said: "%s"'
if args:
irc.reply(template % ' '.join(args))
return
url = "http://www.ivyjoy.com/quote.shtml"
try:
resp = web.getUrl(url, headers={'User-agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.0.13) Gecko/2009073022 Firefox/3.0.13'})
soup = BS.BeautifulSoup(resp)
quotation = soup.find('font').contents[0].strip()
except:
irc.reply(template % "Some stupid error occurred")
irc.reply(template % quotation, prefixNick=False)