本文整理汇总了Python中web.get函数的典型用法代码示例。如果您正苦于以下问题:Python get函数的具体用法?Python get怎么用?Python get使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: dictionary_search
def dictionary_search(query, phenny):
if hasattr(phenny.config, 'wordnik_api_key'):
query = query.replace('!', '')
query = web.quote(query)
try:
uri = 'http://api.wordnik.com/v4/word.json/' + query + '/definitions?limit=1&includeRelated=false&sourceDictionaries=wiktionary&useCanonical=false&includeTags=false&api_key=' + phenny.config.wordnik_api_key
rec_bytes = web.get(uri)
jsonstring = json.loads(rec_bytes)
dword = jsonstring[0]['word']
except:
try:
query = query.lower()
uri = 'http://api.wordnik.com/v4/word.json/' + query + '/definitions?limit=1&includeRelated=false&sourceDictionaries=wiktionary&useCanonical=false&includeTags=false&api_key=' + phenny.config.wordnik_api_key
rec_bytes = web.get(uri)
jsonstring = json.loads(rec_bytes)
dword = jsonstring[0]['word']
except:
query = string.capwords(query)
uri = 'http://api.wordnik.com/v4/word.json/' + query + '/definitions?limit=1&includeRelated=false&sourceDictionaries=wiktionary&useCanonical=false&includeTags=false&api_key=' + phenny.config.wordnik_api_key
rec_bytes = web.get(uri)
jsonstring = json.loads(rec_bytes)
try:
dword = jsonstring[0]['word']
except:
return None
if dword:
ddef = jsonstring[0]['text']
dattr = jsonstring[0]['attributionText']
dpart = jsonstring[0]['partOfSpeech']
dpart = dpart.replace('-', ' ')
dpart = string.capwords(dpart)
return (dword + ' - ' + dpart + ' - ' + ddef + ' - ' + dattr)
else:
return 'Sorry but you need to set your wordnik_api_key in the config file.'
示例2: scrape_wiki_codes
def scrape_wiki_codes():
data = {}
base_url = 'http://en.wikipedia.org/wiki/List_of_ISO_639'
#639-1
resp = web.get(base_url + '-1_codes')
h = html.document_fromstring(resp)
table = h.find_class('wikitable')[0]
for row in table.findall('tr')[1:]:
name = row.findall('td')[2].find('a').text
code = row.findall('td')[4].text
data[code] = name
#639-2
resp = web.get(base_url + '-2_codes')
h = html.document_fromstring(resp)
table = h.find_class('wikitable')[0]
for row in table.findall('tr')[1:]:
name = row.findall('td')[3].find('a')
if name:
name = name.text
else:
continue
code_list = row.findall('td')[0].text.split(' ')
if len(code_list) == 1:
code = code_list[0]
else:
for i in code_list:
if '*' in i:
code = i.replace('*', '')
break
data[code] = name
return data
示例3: randomreddit
def randomreddit(phenny, input):
subreddit = input.group(2)
if not subreddit:
phenny.say(".random <subreddit> - get a random link from the subreddit's frontpage")
return
if not re.match('^[A-Za-z0-9_-]*$',subreddit):
phenny.say(input.nick + ": bad subreddit format.")
return
url = "http://www.reddit.com/r/" + subreddit + "/.json"
try:
resp = web.get(url)
except:
try:
resp = web.get(url)
except:
try:
resp = web.get(url)
except:
raise GrumbleError('Reddit or subreddit unreachable.')
reddit = json.loads(resp)
post = choice(reddit['data']['children'])
nsfw = False
if post['data']['over_18']:
nsfw = True
if nsfw:
phenny.reply("!!NSFW!! " + post['data']['url'] + " (" + post['data']['title'] + ") !!NSFW!!")
else:
phenny.reply(post['data']['url'] + " (" + post['data']['title'] + ")")
示例4: checkBeacon
def checkBeacon(phenny):
global beaconStatus
beaconStatus = True
while not singleton.beaconPlzStop:
"""check beacon """
f = open(beaconFile)
activated = int(f.read())
f.close()
if (int(activated) + beaconDelay) > int(time.time()):
web.get(beaconURL + 'ACTIVE')
a.digital_write(9, firmata.HIGH)
LEDShift(a, c.currentColor, "00FF00", 2)
time.sleep(2)
LEDShift(a, "00FF00", c.currentColor, 2)
if (beaconStatus == False):
print "Beacon has been activated"
phenny.msg('#hive76', "Beacon has been activated")
beaconStatus = True
else:
web.get(beaconURL + 'INACTIVE')
a.digital_write(9, firmata.LOW)
LEDShift(a, c.currentColor, "FF0000", 2)
time.sleep(2)
LEDShift(a, "FF0000", c.currentColor, 2)
if (beaconStatus == True):
phenny.msg('#hive76', "Beacon has deactivated")
print "Beacon has been deactivated"
beaconStatus = False
time.sleep(30)
示例5: forecast
def forecast(phenny, input):
if input.group(2):
fetch=readdbattr(input.group(2).lower(),'location')
else:
fetch=readdbattr(input.nick.lower(),'location')
if fetch:
arg=fetch.split(".")[0]
if arg.isdigit():
arg=fetch.split(";")[0] #fetched something so use location from said nick
else:
arg=fetch.split(".")[0] #fetched something so use location from said nick
else:
arg=input.group(2) # look up arguments given (place)
try:
if not arg:
return phenny.say("Location please?")
except:
pass
fccoordsjs=json.loads(web.get("http://maps.googleapis.com/maps/api/geocode/json?sensor=false&address="+arg))
pprint.pprint(fccoordsjs)
coords=fccoordsjs['results'][0]['geometry']['location']
fciojs=web.get("https://api.forecast.io/forecast/"+ phenny.config.forecastapikey+"/"+str(coords['lat'])+','+str(coords['lng']))
fcjs=json.loads(fciojs)
pprint.pprint(fcjs)
try:
out='Currently : ' + fcjs['currently']['summary']
except:
try:
out='Next few minutes : '+fcjs['minutely']['summary']
except:
out='Next Hour : ' + fcjs['hourly']['summary']
return phenny.say(out)
示例6: office
def office(jenni, input):
try:
site = web.get("http://opensource.osu.edu/~meinwald/office.php")
except:
site = web.get("http://web2/~meinwald/office.php")
lines = site.split("\n")
jenni.reply(lines[2])
示例7: search
def search(self, term):
try:
exactterm = format_term(term)
exactterm = quote(exactterm)
exacturl = self.endpoints['url'].format(exactterm)
html = web.get(exacturl)
return (html, exacturl)
except HTTPError:
pass
term = deformat_term(term)
term = quote(term)
apiurl = self.endpoints['api'].format(term)
try:
result = json.loads(web.get(apiurl))
except ValueError as e:
raise ContentDecodingError(str(e))
if 'error' in result:
raise ServerFault(result['error'])
result = result['query']['search']
if not result:
return None
term = result[0]['title']
term = format_term(term)
term = quote(term)
url = self.endpoints['url'].format(term)
html = web.get(url)
return (html, url)
示例8: duck_api
def duck_api(query):
'''Send 'query' to DDG's API and return results as a dictionary'''
query = web.urllib.quote(query)
uri = 'https://api.duckduckgo.com/?q=%s&format=json&no_html=1&no_redirect=1&kp=-1' % query
results = web.get(uri)
results = json.loads(web.get(uri))
return results
示例9: search
def search(self, term):
try:
exactterm = format_term(term)
exactterm = quote(exactterm)
exacturl = self.endpoints['url'].format(exactterm)
html = web.get(exacturl)
return (html, exacturl)
except HTTPError:
pass
term = deformat_term(term)
term = quote(term)
apiurl = self.endpoints['api'].format(term)
try:
result = json.loads(web.get(apiurl))
except ValueError:
return None
result = result['query']['search']
if not result:
return None
term = result[0]['title']
term = format_term(term)
term = quote(term)
url = self.endpoints['url'].format(term)
html = web.get(url)
return (html, url)
示例10: getdjia
def getdjia(date):
if date in djiacache:
return djiacache[date]
conn = pool.connection()
cur = conn.cursor()
cur.execute("select djia from djiacache where dato = '%s'" % (date,))
djia = cur.fetchone()
if djia:
return djia[0]
for crox in croxes:
try:
djia = web.get(crox % date)
if not (("error" in djia) or ("available" in djia)):
djiacache[date] = djia
cur.execute("insert into djiacache values ('%s','%s')" % (date, djia))
conn.commit()
return djia
except:
pass
try:
y, m, d = str(date).split("-")
djia = web.get("http://irc.peeron.com/xkcd/map/data/%s/%s/%s" % (y, m, d))
return djia
except Exception, e:
return None
示例11: ShowWeather
def ShowWeather(self, local, para, nick, todos, detalhes):
host = 'http://br.weather.com'
uri = host + '/search/search?what=WeatherLocalUndeclared&where=' + local.replace(' ', '+').lower()
dados = web.get(uri, True)
src = dados[0]
if Tag.inicio_conteudo in src:
self.weather(src, para, dados[1], detalhes)
else:
locais = []
if not (Tag.titulo in src):
self.Bot.Say(para, self.msg_rand(Frases.nao_encontrado).replace('%cidade', local) )
else:
src = src[src.find(Tag.titulo):]
while (Tag.link_inicio in src):
tag = src.find(Tag.link_fim)
item = src[src.find(Tag.link_inicio):tag]
locais.append(item[:item.find('"')])
src = src[tag+len(Tag.link_fim):]
if len(locais) == 0: return
if len(locais) == 1: todos = True
if not todos:
self.Bot.Say(para, Frases.varios_resultados % len(locais))
else:
for link in locais:
dados = web.get(host + link, True)
self.weather(dados[0], para, dados[1], detalhes)
示例12: spotify_track
def spotify_track(uri, phenny, radio):
idsplit = uri.split('/')
if radio is False:
id = idsplit[4]
else:
id = idsplit[5]
apiuri = 'https://api.spotify.com/v1/tracks/' + id
try:
rec_bytes = web.get(apiuri)
except:
return
jsonstring = json.loads(rec_bytes)
track = jsonstring['name']
album = jsonstring['album']['name']
artistarray = jsonstring['artists']
if len(artistarray) > 1:
multipleartists = True
else:
multipleartists = False
if multipleartists is False:
artist = artistarray[0]['name']
else:
artist = "Various Artists"
albumid = jsonstring['album']['id']
albumurl = 'https://api.spotify.com/v1/albums/' + albumid
try:
rec_bytes_album = web.get(albumurl)
jsonstringalbum = json.loads(rec_bytes_album)
released = jsonstringalbum['release_date']
except:
isdateutil = False
try:
import dateutil.parser
isdateutil = True
dt = dateutil.parser.parse(released)
timestamp1 = calendar.timegm(dt.timetuple())
timestamp1 = time.gmtime(timestamp1)
if re.compile('day').match(jsonstringalbum['release_date_precision']):
releasedformat = time.strftime('on %A %B %d, %Y',timestamp1)
else:
if re.compile('month').match(jsonstringalbum['release_date_precision']):
releasedformat = time.strftime('in %B, %Y',timestamp1)
else:
if re.compile('year').match(jsonstringalbum['release_date_precision']):
releasedformat = time.strftime('in %Y',timestamp1)
else:
isdateutil = False
except:
isdateutil = False
milliseconds = jsonstring['duration_ms']
seconds=(milliseconds/1000)%60
minutes=(milliseconds/(1000*60))%60
minutes = str(int(minutes))
seconds = str(round(seconds)).zfill(2)
tracktime = minutes + ":" + seconds
if isdateutil is True:
return '\002\00303,01Spotify\017 ' + track + ' - ' + artist + ' - ' + album + ' - ' + tracktime + ' released ' + releasedformat
else:
return '\002\00303,01Spotify\017 ' + track + ' - ' + artist + ' - ' + album + ' - ' + tracktime
示例13: nws_lookup
def nws_lookup(jenni, input):
""" Look up weather watches, warnings, and advisories. """
text = input.group(2)
if not text:
return
bits = text.split(",")
master_url = False
if len(bits) == 2:
## county given
url_part1 = "http://alerts.weather.gov"
state = bits[1].lstrip().rstrip().lower()
county = bits[0].lstrip().rstrip().lower()
if state not in states:
jenni.reply("State not found.")
return
url1 = county_list.format(states[state])
page1 = web.get(url1).split("\n")
for line in page1:
mystr = ">" + unicode(county) + "<"
if mystr in line.lower():
url_part2 = line[9:36]
break
if not url_part2:
jenni.reply("Could not find county.")
return
master_url = url_part1 + url_part2
location = text
elif len(bits) == 1:
## zip code
if bits[0]:
urlz = zip_code_lookup.format(bits[0])
pagez = web.get(urlz)
fips = re_fips.findall(pagez)
if fips:
state = re_state.findall(pagez)
if not state:
jenni.reply("Could not match ZIP code to a state")
return
location = state[0]
state = location[-2:]
fips = unicode(state) + "C" + unicode(fips[0])
master_url = alerts.format(fips)
else:
jenni.reply("ZIP code does not exist.")
return
if not master_url:
jenni.reply("Invalid input. Please enter a ZIP code or a county and state pairing, such as 'Franklin, Ohio'")
return
feed = feedparser.parse(master_url)
for item in feed.entries:
if nomsg[:51] == item["title"]:
jenni.reply(nomsg.format(location))
break
else:
jenni.reply(unicode(item["title"]))
jenni.reply(unicode(item["summary"]))
示例14: setup
def setup(bot):
if True:#not hasattr(bot,'stringtables'):
bot.stringtable_version = None
verinfo = bot.masterserver_request({'version' : '0.0.0.0', 'os' : 'lac' ,'arch' : 'x86-biarch'},path = 'patcher/patcher.php')
verinfo = verinfo[0]
if bot.stringtable_version == verinfo['version']:
print("no need to update stringtables")
return
manifest = None
try:
manifest = web.get('{0}{1}/{2}/{3}/manifest.xml.zip'.format(verinfo['url'],verinfo['os'],verinfo['arch'],verinfo['version']))
except:pass
if manifest is None:
try:
manifest = web.get('{0}{1}/{2}/{3}/manifest.xml.zip'.format(verinfo['url2'],verinfo['os'],verinfo['arch'],verinfo['version']))
except:pass
if manifest is None:
print("Couldn't get manifest for hon's files")
return
bot.stringtables = {}
manifest = etree.fromstring(zipfile.ZipFile(StringIO.StringIO(manifest)).read('manifest.xml'))
files = []
for e in manifest:
if e.tag == 'file' and e.attrib['path'] in stringtablefiles:
files.append(e.attrib)
for f in files:
if f['version'].count('.') == 3 and f['version'].endswith('.0'):
f['version'] = f['version'][:-2]
table = None
try:
table = web.get('{0}{1}/{2}/{3}/{4}.zip'.format(verinfo['url'],verinfo['os'],verinfo['arch'],f['version'],f['path']))
except:pass
if table is None:
try:
table = web.get('{0}{1}/{2}/{3}/{4}.zip'.format(verinfo['url2'],verinfo['os'],verinfo['arch'],f['version'],f['path']))
except:pass
if table is None:
print("Wasn't able to fetch {0}".format(f['path']))
continue
table = zipfile.ZipFile(StringIO.StringIO(table)).read(basename(f['path']))
try:
table = table.decode("utf8")
except:
table = table.decode("cp1251")
table = table.splitlines()
for line in table:
m = re_entry.match(line)
if m:
bot.stringtables[m.group(1)] = m.group(2)
bot.stringtable_version = verinfo['version']
示例15: awik
def awik(phenny, input):
"""Search for something on Apertium wiki."""
origterm = input.groups()[1]
if not origterm:
return phenny.say('Perhaps you meant ".wik Zen"?')
#origterm = origterm.encode('utf-8')
term = format_term(origterm)
try:
html = str(web.get(wikiuri % (term)))
except:
apiResponse = json.loads(str(web.get(wikisearchuri % (term, 'title'))))
if len(apiResponse['query']['search']):
term = apiResponse['query']['search'][0]['title']
html = str(web.get(wikiuri % (term)))
else:
apiResponse = json.loads(str(web.get(wikisearchuri % (term, 'text'))))
if len(apiResponse['query']['search']):
term = apiResponse['query']['search'][0]['title']
html = str(web.get(wikiuri % (term)))
else:
phenny.reply("No wiki results for that term.")
return
page = lxml.html.fromstring(html)
if "#" in origterm:
section = format_subsection(origterm.split("#")[1])
text = page.find(".//span[@id='%s']" % section)
if text is None:
phenny.reply("That subsection does not exist.")
return
text = text.getparent().getnext()
else:
paragraphs = page.findall('.//p')
if len(paragraphs) > 2:
text = page.findall('.//p')[1]
else:
text = page.findall(".//*[@id='mw-content-text']")[0]
sentences = text.text_content().split(". ")
sentence = '"' + sentences[0] + '"'
maxlength = 430 - len((' - ' + wikiuri % (format_term_display(term))).encode('utf-8'))
if len(sentence.encode('utf-8')) > maxlength:
sentence = sentence.encode('utf-8')[:maxlength].decode('utf-8', 'ignore')
words = sentence[:-5].split(' ')
words.pop()
sentence = ' '.join(words) + ' [...]'
phenny.say(sentence + ' - ' + wikiuri % (format_term_display(term)))