本文整理汇总了Python中mechanize.Browser.form['q']方法的典型用法代码示例。如果您正苦于以下问题:Python Browser.form['q']方法的具体用法?Python Browser.form['q']怎么用?Python Browser.form['q']使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mechanize.Browser
的用法示例。
在下文中一共展示了Browser.form['q']方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import form['q'] [as 别名]
def __init__(self):
'''
Setting start urls dynamically using mechanize
'''
product_list = ['micromax', 'apple']
br = Browser()
br.set_handle_robots( False )
br.addheaders = [('User-agent', 'Firefox')]
for pl in product_list:
br.open("http://www.flipkart.com/")
br.select_form(nr=1)
br.form['q'] = pl
self.resp = br.submit()
self.start_urls.append(self.resp.geturl())
示例2: get_product_url
# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import form['q'] [as 别名]
def get_product_url(item_list):
"""
Return the url for each item in item_list from flipkart.
"""
url_list = []
br = Browser() #get the browser instance
br.set_handle_robots(False) #ignore robot.txt
br.addheaders = [('User-agent', 'Firefox')] # setting browser agent
br.open("http://www.flipkart.com/") # open url
for item in item_list:
br.select_form(nr=1) #select form using postion, as form name is nor available
br.form['q'] = item # set item to be searched
br.submit()
url_list.append(br.geturl())
return url_list
示例3: searchByGoogle
# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import form['q'] [as 别名]
def searchByGoogle(ean=None, short=False):
br = Browser()
br.set_handle_robots(False)
br.addheaders = [('User-agent','Firefox'),]
br.open('http://www.google.com')
br.select_form('f')
query = str(ean) #" ".join([ str(ii) for ii in (ean,title,interpret) if ii ])
br.form['q'] = "%s site:.audio3.cz" % (query,)
br.submit()
results = []
audio3LinkRe = re.compile("http://www.audio3.cz/goods.asp\?.*gid=(.*)")
searchResults = []
if short:
data = br.response().read()
soup = BeautifulSoup(unicode(data,'cp1250'))
resultSection = soup.find('div',{'id':'res'})
results = resultSection.findAll('li',{'class':'g'})
for result in results:
links = result.findAll('a',{})
for ll in links:
reresult = audio3LinkRe.search(ll.get('href'))
if reresult:
gid = reresult.group(1)
desc = "".join(map(str,result.contents))
searchResults.append((gid,desc))
pass
pass
pass
else:
for link in br.links():
siteMatch = audio3LinkRe.search( link.url )
if siteMatch:
audio3detail = br.follow_link(link).get_data()
soup = BeautifulSoup(unicode(audio3detail, 'cp1250'))
img = soup.find('div',{'id':'img'})
desc = soup.find('ul',{'id':'desc'})
res = (siteMatch.group(1),"<style type='text/css'>.saleprice{color:red;font-weight:bold}</style><div style='float:left;margin-right:3em;'>%s</div><div>%s</div><a href='%s'>original link</a><hr/>" % (str(img),str(desc),link.url))
searchResults.append(res)
pass
pass
return (searchResults,None)
示例4: Browser
# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import form['q'] [as 别名]
from mechanize import Browser
br = Browser() #get the browser instance
br.set_handle_robots(False) #ignore robot.txt
br.addheaders = [('User-agent', 'Firefox')] # setting browser agent
br.open("http://www.google.co.in/") # open url
br.select_form('f') #select form
br.form['q'] = 'python' #set input field data to be searched
br.submit()
for link in br.links():
print link.text
示例5: searchTitle
# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import form['q'] [as 别名]
def searchTitle(rawtitle):
br = Browser()
# Ignore robots.txt
br.set_handle_robots( False )
# Google demands a user-agent that isn't a robot
br.addheaders = [('User-agent', 'Firefox')]
br.open( "http://www.google.com " )
br.select_form( 'f' )
s='imdb'+' + '+' '.join(re.compile('[\.]').split(rawtitle))
br.form[ 'q' ] = s
br.submit()
resp = None
for link in br.links():
siteMatch = re.compile( 'www.imdb.com/title/tt[0-9]*/$' ).search( link.url )
if siteMatch:
resp = br.follow_link( link )
print link.url
break
soup = BeautifulSoup(resp.get_data())
title = re.sub(' - IMDb','',soup.find('title').string)
title = re.sub('\([0-9]*\)','',title)
return title
示例6: Browser
# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import form['q'] [as 别名]
import re
from mechanize import Browser
br = Browser()
# Ignore robots.txt
br.set_handle_robots( False )
# Google demands a user-agent that isn't a robot
br.addheaders = [('User-agent', 'Firefox')]
# Retrieve the Google home page, saving the response
br.open( "http://google.co.in" )
# Select the search box and search for 'foo'
br.select_form( nr=0)
br.form[ 'q' ] = 'satvik gupta'
# Get the search results
br.submit()
# Find the link to foofighters.com; why did we run a search?
resp = None
for link in br.links():
print link.text
print "\n\n"
# siteMatch = re.compile( 'www.foofighters.com' ).search( link.url )
# if siteMatch:
# resp = br.follow_link( link )
# break
# Print the site
# content = resp.get_data()
示例7: Browser
# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import form['q'] [as 别名]
#!/usr/bin/python
import re
from mechanize import Browser
br = Browser()
# Ignore robots.txt
br.set_handle_robots( False )
# Google demands a user-agent that isn't a robot
br.addheaders = [('User-agent', 'Firefox')]
# Retrieve the Google home page, saving the response
br.open( "http://google.com" )
# Select the search box and search for 'foo'
br.select_form( 'f' )
br.form[ 'q' ] = 'foo'
# Get the search results
br.submit()
# Find the link to foofighters.com; why did we run a search?
resp = None
for link in br.links():
siteMatch = re.compile( 'www.foofighters.com' ).search( link.url )
if siteMatch:
resp = br.follow_link( link )
break
# Print the site
content = resp.get_data()
print(content)