本文整理汇总了Python中util.http.get_soup函数的典型用法代码示例。如果您正苦于以下问题:Python get_soup函数的具体用法?Python get_soup怎么用?Python get_soup使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_soup函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fourchanthread_url
def fourchanthread_url(match):
soup = http.get_soup(match)
title = soup.title.renderContents().strip()
post = soup.find('div', {'class': 'opContainer'})
comment = post.find('blockquote', {'class': 'postMessage'}).renderContents().strip()
author = post.find_all('span', {'class': 'nameBlock'})[1]
return http.process_text("\x02{}\x02 - posted by \x02{}\x02: {}".format(title, author, comment[:trimlength]))
示例2: animetake
def animetake(inp):
"animetake <list> | <get [query]> - searches animetake for the latest updates"
error = u"not so lucky today.."
try:
inp_array = inp.split(" ")
command = inp_array[0]
query = inp_array[1]
except:
pass
url = "http://www.animetake.com/" #% (urllib.quote_plus(query))
anime_updates = []
response = ""
soup = http.get_soup(url)
page = soup.find("div", id="mainContent").ul
for li in page.findAll("li"):
anime_link = li.find("div", "updateinfo").h4.a
anime_updates.append("%s : %s" % (anime_link["title"], anime_link["href"]))
if command == "list":
count = 1
response = "Latest Anime Updates: "
for anime_title in anime_updates:
response += "%s | " % (anime_title.split(" : ")[0])
count += 1
if count == 11:
break
elif command == "get":
indices = [i for i, x in enumerate(anime_updates) if query in x]
for index in indices:
response += "%s " % (anime_updates[index])
return response
示例3: horoscope
def horoscope(inp, db=None, notice=None, nick=None):
"""horoscope <sign> [save] -- Get your horoscope."""
save = False
database.init(db)
if '@' in inp:
nick = inp.split('@')[1].strip()
sign = database.get(db,'users','horoscope','nick',nick)
if not sign: return "No horoscope sign stored for {}.".format(nick)
else:
sign = database.get(db,'users','horoscope','nick',nick)
if not inp:
if not sign:
notice(horoscope.__doc__)
return
else:
if not sign: save = True
if " save" in inp: save = True
sign = inp.split()[0]
url = "http://my.horoscope.com/astrology/free-daily-horoscope-%s.html" % sign
try:
result = http.get_soup(url)
title = result.find_all('h1', {'class': 'h1b'})[1].text
horoscopetxt = result.find('div', {'id': 'textline'}).text
except: return "Could not get the horoscope for {}.".format(sign.encode('utf8'))
if sign and save: database.set(db,'users','horoscope',sign,'nick',nick)
return u"\x02{}\x02 {}".format(title, horoscopetxt)
示例4: animetake
def animetake(inp):
"animetake <list> | <get [query]> - searches animetake for the latest updates"
error = u'not so lucky today..'
try:
inp_array = inp.split(' ')
command = inp_array[0]
query = inp_array[1]
except:
pass
url = "http://www.animetake.com/" #% (urllib.quote_plus(query))
anime_updates = []
response = ""
soup = http.get_soup(url)
page = soup.find('div', id='mainContent').ul
for li in page.findAll('li'):
anime_link = li.find('div', 'updateinfo').h4.a
anime_updates.append('%s : %s' % (anime_link['title'], anime_link['href']))
if command == 'list':
count = 1
response = "Latest Anime Updates: "
for anime_title in anime_updates:
response += ("%s | " % (anime_title.split(' : ')[0]))
count+=1
if count == 11:
break
elif command == 'get':
indices = [i for i, x in enumerate(anime_updates) if query in x]
for index in indices:
response += ("%s " % (anime_updates[index]))
return response
示例5: fourchan_url
def fourchan_url(match):
soup = http.get_soup(match)
title = soup.title.renderContents().strip()
post = soup.find('div', {'class': 'opContainer'})
comment = post.find('blockquote', {'class': 'postMessage'})
author = post.find_all('span', {'class': 'nameBlock'})[1]
return http.process_text('\x02%s\x02 - posted by \x02%s\x02: %s' % (title, author, comment))
示例6: get_woots
def get_woots(inp):
woots = {}
for k, v in inp.items():
try:
w = {}
soup = http.get_soup(api + v)
w['product'] = soup.find('woot:product').text
w['wootoff'] = soup.find('woot:wootoff').text
w['price'] = soup.find('woot:price').text
w['pricerange'] = soup.find('woot:pricerange').text
w['shipping'] = soup.find('woot:shipping').text
w['url'] = "http://{}".format(v)
w['soldout'] = soup.find('woot:soldout').text
w['soldoutpercent'] = soup.find('woot:soldoutpercentage').text
category = text.capitalize_first(k if k == 'woot' else "%s woot" % k)
if w['wootoff'] != "false":
category += "off!"
woots[category] = w
except:
continue
return woots
示例7: fact
def fact():
"""fact -- Gets a random fact from OMGFACTS."""
attempts = 0
# all of this is because omgfacts is fail
while True:
try:
soup = http.get_soup("http://www.omg-facts.com/random")
except (http.HTTPError, http.URLError):
if attempts > 2:
return "Could not find a fact!"
else:
attempts += 1
continue
response = soup.find("a", {"class": "surprise"})
link = response["href"]
fact_data = "".join(response.find(text=True))
if fact_data:
fact_data = fact_data.strip()
break
else:
if attempts > 2:
return "Could not find a fact!"
else:
attempts += 1
continue
url = web.try_isgd(link)
return "{} - {}".format(fact_data, url)
示例8: fact
def fact(inp, say=False, nick=False):
"""fact -- Gets a random fact from OMGFACTS."""
attempts = 0
# all of this is because omgfacts is fail
while True:
try:
soup = http.get_soup('http://www.omg-facts.com/random')
except:
if attempts > 2:
return "Could not find a fact!"
else:
attempts += 1
continue
response = soup.find('a', {'class': 'surprise'})
link = response['href']
fact = ''.join(response.find(text=True))
if fact:
fact = fact.strip()
break
else:
if attempts > 2:
return "Could not find a fact!"
else:
attempts += 1
continue
url = web.try_isgd(link)
return "{} - {}".format(fact, url)
示例9: refresh_cache
def refresh_cache():
""" gets a page of random FMLs and puts them into a dictionary """
soup = http.get_soup('http://www.fmylife.com/random/')
for e in soup.find_all('p', attrs={'class': 'block'}):
id = int(e.find_all('a', href=True)[0]['href'].split('_')[1].split('.')[0])
text = e.find_all('a')[0].text.strip()
fml_cache.append((id, text))
示例10: refresh_cache
def refresh_cache():
""" gets a page of random FMLs and puts them into a dictionary """
soup = http.get_soup('http://www.fmylife.com/random/')
for e in soup.find_all('div', {'class': 'post article'}):
fml_id = int(e['id'])
text = ''.join(e.find('p').find_all(text=True))
fml_cache.append((fml_id, text))
示例11: get_bash_quote
def get_bash_quote(inp):
try:
soup = http.get_soup('http://bash.org/?%s' % inp)
quote_info = soup.find('p', {'class': 'quote'}).text
quote = soup.find('p', {'class': 'qt'}).text
return ('\x02#{}\x02 ({}): {}'.format(quote_info.split()[0].replace('#',''), quote_info.split()[1].split('(')[1].split(')')[0].strip(), quote.replace('\n', ' ').replace('\r', ' |')))
except:
return "No quote found."
示例12: get_yandere_tags
def get_yandere_tags(inp):
url = 'https://yande.re/post?tags=%s' % inp.replace(' ','_')
soup = http.get_soup(url)
imagelist = soup.find('ul', {'id': 'post-list-posts'}).findAll('li')
image = imagelist[random.randint(0,len(imagelist)-1)]
imageid = image["id"].replace('p','')
title = image.find('img')['title']
src = image.find('a', {'class': 'directlink'})["href"]
return u"\x034NSFW\x03: \x02({})\x02 {}: {}".format(imageid, title, web.isgd(http.unquote(src)))
示例13: refresh_cache
def refresh_cache():
"gets a page of random MLIAs and puts them into a dictionary "
url = 'http://mylifeisaverage.com/%s' % random.randint(1,11000)
soup = http.get_soup(url)
for story in soup.find_all('div', {'class': 'story '}):
mlia_id = story.find('span', {'class': 'left'}).a.text
mlia_text = story.find('div', {'class': 'sc'}).text.strip()
mlia_cache.append((mlia_id, mlia_text))
示例14: get_yandere_tags
def get_yandere_tags(inp):
url = "https://yande.re/post?tags=%s" % inp.replace(" ", "_")
soup = http.get_soup(url)
imagelist = soup.find("ul", {"id": "post-list-posts"}).findAll("li")
image = imagelist[random.randint(0, len(imagelist) - 1)]
imageid = image["id"].replace("p", "")
title = image.find("img")["title"]
src = image.find("a", {"class": "directlink"})["href"]
return "\x034NSFW\x03: \x02({})\x02 {}: {}".format(imageid, title, web.isgd(http.unquote(src)))
示例15: steam
def steam(inp):
""".steam [search] - Search for specified game/trailer/DLC."""
soup = http.get_soup("http://store.steampowered.com/search/?term={}".format(inp))
result = soup.find('a', {'class': 'search_result_row'})
try:
return (get_steam_info(result['href']) +
" - " + web.try_googl(result['href']))
except Exception as e:
print "Steam search error: {}".format(e)
return "Steam API error, please try again later."