当前位置: 首页>>代码示例>>Python>>正文


Python Browser.set_cookiejar方法代码示例

本文整理汇总了Python中mechanize.Browser.set_cookiejar方法的典型用法代码示例。如果您正苦于以下问题:Python Browser.set_cookiejar方法的具体用法?Python Browser.set_cookiejar怎么用?Python Browser.set_cookiejar使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mechanize.Browser的用法示例。


在下文中一共展示了Browser.set_cookiejar方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: TPB

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import set_cookiejar [as 别名]
class TPB(Torrent):
    def __init__(self, otherFilters, minSize, debug):
        self._urlBase = "https://pirateproxy.one"
        self._urlSearch = u"https://pirateproxy.one/search/{name} {episode}"
        self._languageDict = {"english": 2, "spanish": 14}
        #To MB
        self._minSize = int(minSize) / 1048576
        self._debug = debug
        extraFilters = u"{otherFilters}"
        if otherFilters != "":
            self._otherFilers = u" "+otherFilters
            
        else:
            self._otherFilers = ""
            
        self._urlSearch = ''.join([self._urlSearch,extraFilters.format(otherFilters=self._otherFilers)])
        self._browser = Browser()
        self._browser.set_handle_robots(False)
        self._cookieJar = cookielib.LWPCookieJar()
        self._browser.set_cookiejar(self._cookieJar)
        self._browser.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0'), ('Accept', '*/*'), ('Accept-Encoding',"gzip,deflate")]
        self._browser.open(self._urlBase)
        
    def episodeSearch(self, serie, episode):
        searchQuery = self._urlSearch.format(name=serie, episode=episode["number"])
        logging.debug(u"searchURL: {}".format(searchQuery))
        try:
            self._browser.open(searchQuery)
            gzipContent = self._browser.response().read()
            html = gzip.GzipFile(fileobj=StringIO.StringIO(gzipContent)).read()
            #Scrapping the page.
            soup = BeautifulSoup(html)
            try:
                items = soup.find('table',id="searchResult").findAll('tr')
            
            except AttributeError:
                logging.error(u"There wasn't results for: {}".format(searchQuery))
                return None
                
            #We skip the first tr because is the header. (no tbody in html).
            for item in items[1:]:
                contentLength =  item.find("font" ,{"class": "detDesc"}).text.split(',')[1].replace(' ',' ').strip().split(' ')[1:]
                logging.debug(u"contentLength: {}".format(contentLength))
                logging.debug(u"minSize: {}".format(self._minSize))
                if contentLength[1][:3] != 'GiB' and float(contentLength[0]) < self._minSize:
                    logging.warning(u"Torrent to small: {}".format(' '.join([contentLength[0], contentLength[1] [:3]])))
                    continue
                
                magnetUri = item.findAll('a', {"title": "Download this torrent using magnet"})[0]['href']
                logging.info(u"Going to download: {}".format( item.find("a" ,{"class": "detLink"}).text))
                logging.info(u"File size: {}".format(' '.join([contentLength[0], contentLength[1] [:3]])))
                return magnetUri
                break
                
            return None
            
        except HTTPError, e:
            logging.error( u"There was an error in the URL {}.".format(searchQuery))
            return None
            
开发者ID:zrrs,项目名称:script.service.series-downloader,代码行数:61,代码来源:tpb.py

示例2: __init__

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import set_cookiejar [as 别名]
    def __init__(self, config):
        self.login_url = 'http://%s.ogame.gameforge.com/' % config.country
        # http://s114-br.ogame.gameforge.com/game/index.php?page=overview
        self.index_url = 'http://s%s-%s.ogame.gameforge.com' % (config.universe, config.country) + '/game/index.php'
        headers = [('User-agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64) \
        AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36')]
        # Authentication data
        self.username = config.username
        self.password = config.password
        self.universe = config.universe
        self.country = config.country

        self.logger = logging.getLogger('ogame-bot')
        # Setting up the browser
        self.cj = cookielib.LWPCookieJar()

        br = Browser()
        br.set_cookiejar(self.cj)
        br.set_handle_robots(False)
        br.addheaders = headers
        # self.path = os.path.dirname(os.path.realpath(__file__))
        # name of the cookies file
        # self.cookies_file_name = os.path.join(self.path, 'cookies.tmp')
        self.cookies_file_name = 'cookies.tmp'
        super(AuthenticationProvider, self).__init__(br, config)
开发者ID:winiciuscota,项目名称:OG-Bot,代码行数:27,代码来源:authentication.py

示例3: down_image

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import set_cookiejar [as 别名]
 def down_image(self, img):
     print "down image from " + img
     down_br = Browser()
     down_cj = CookieJar()
     down_br.set_cookiejar(down_cj)
     fn = tempfile.mktemp(suffix='.png')
     return down_br.retrieve(img, filename = fn)[0]
开发者ID:lite,项目名称:yebob_utils,代码行数:9,代码来源:Yebob.py

示例4: login_to_kaggle

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import set_cookiejar [as 别名]
    def login_to_kaggle(self):  
        """ Login to Kaggle website
        Parameters:
        -----------
        None
        
        Returns:
        browser: Browser
            a mechanizer Browser object to be used for further access to site
        """          
        
        if self.verbose:
            print("Logging in to Kaggle..."),

        br = Browser()
        cj = cookielib.LWPCookieJar()
        br.set_cookiejar(cj)
        
        br.open(self.kag_login_url)
        
        br.select_form(nr=0)
        br['UserName'] = self.kag_username
        br['Password'] = self.kag_password
        br.submit(nr=0)
        
        if br.title() == "Login | Kaggle":
            raise KaggleError("Unable to login Kaggle with username %s (response title: %s)" % (self.kag_username,br.title()))
        
        if self.verbose:
            print("done!")
        
        return br
开发者ID:joostgp,项目名称:ml_toolbox,代码行数:34,代码来源:kaggle.py

示例5: login_url

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import set_cookiejar [as 别名]
def login_url(
                url,
                login,
                passwd,
                form_nomber,
                login_name,
                paswd_name,
                submit_nomber
            ):
    br = Browser(); showMessage('Создаю интерфейс браузера')
    cj = cookielib.LWPCookieJar()
    br.set_cookiejar(cj)
    br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]

    br.open(url); showMessage('Загружаю сайт и произвожу вход')
    br.select_form(nr = form_nomber)
    br[login_name] = login
    br[paswd_name] = passwd

    res = br.submit(nr = submit_nomber)
    content = res.read()
    #определить число страниц
    maxPage = int(max_page(content)); showMessage('Определяю количество страниц и перехожу на последнюю')
    curPage = 84
    while curPage < maxPage:
        res = br.open('http://forum.rsload.net/cat-kryaki-seriyniki-varez/topic-4820-page-%d.html' % (maxPage))
        curPage = maxPage
        maxPage = int(max_page(content))
        content = res.read()
    #парсинг ключей
    if get_all_keys(content):
        webbrowser.open_new_tab('http://forum.rsload.net/cat-kryaki-seriyniki-varez/topic-4820-page-%d.html' % (maxPage)) # Вернет True и откроет вкладку
开发者ID:wiom,项目名称:keys_grabber,代码行数:34,代码来源:test.py

示例6: T1337x

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import set_cookiejar [as 别名]
class T1337x(Torrent):
    def __init__(self, otherFilters, minSize, debug):
        self._urlBase = "http://1337x.to"
        self._urlSearch = u"http://1337x.to/search/{name} {episode}"
        self._languageDict = {"english": 2, "spanish": 14}
        #To MB
        self._minSize = int(minSize) / 1048576
        self._debug = debug
        extraFilters = u"{otherFilters}"
        if otherFilters != "":
            self._otherFilers = u" "+otherFilters
            
        else:
            self._otherFilers = ""
            
        self._urlSearch = ''.join([self._urlSearch, extraFilters.format(otherFilters=self._otherFilers), "/1/"])
        self._browser = Browser()
        self._browser.set_handle_robots(False)
        self._cookieJar = cookielib.LWPCookieJar()
        self._browser.set_cookiejar(self._cookieJar)
        self._browser.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0'), ('Accept', '*/*'), ('Accept-Encoding',"gzip,deflate")]
        self._browser.open(self._urlBase)
        
    def episodeSearch(self, serie, episode):
        searchQuery = self._urlSearch.format(name=serie,episode=episode["number"]).replace(" ","+")
        logging.debug( u"searchURL: {}".format(searchQuery))
        try:
            self._browser.open(searchQuery)
            gzipContent = self._browser.response().read()
            html = gzip.GzipFile(fileobj=StringIO.StringIO(gzipContent)).read()
            #Scrapping the page.
            soup = BeautifulSoup(html)
            if (soup.body.findAll(text=' No results were returned. ')):
                logging.error(u"There wasn't results for: {}".format(searchQuery))
                return None
                
            items = soup.find('div', attrs={"class": "table-list-wrap"}).find('tbody').findAll('tr')
            for item in items:
                contentLength =  item.find("td" ,{"class": re.compile("coll-4.*")}).text.split(' ')
                if contentLength[1][:2] != 'GB' and float(contentLength[0]) < self._minSize:
                    logging.warning(u"Torrent to small: {}".format(' '.join([contentLength[0], contentLength[1] [:2]])))
                    continue
                
                infoUrl = item.find("td", attrs={"class": re.compile("coll-1.*")}).findAll('a')[1]['href']
                logging.info(u"Going to download: {}".format(infoUrl.split('/')[-1]))
                logging.info(u"File size: {}".format(' '.join([contentLength[0], contentLength[1] [:2]])))
                self._browser.open(''.join([self._urlBase, infoUrl]) )
                gzipContent = self._browser.response().read()
                html = gzip.GzipFile(fileobj=StringIO.StringIO(gzipContent)).read()
                soup2 = BeautifulSoup(html)
                magnetUri = soup2.find('a', attrs={"class": re.compile(".*btn-magnet")})['href']
                return magnetUri
                break
                
            return None
        except HTTPError, e:
            logging.error( u"There was an error in the URL {}.".format(searchQuery))
            return None
            
开发者ID:zrrs,项目名称:script.service.series-downloader,代码行数:60,代码来源:t1337x.py

示例7: main

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import set_cookiejar [as 别名]
def main(userid, password):    
    br = Browser()
    br.set_cookiejar(cookielib.LWPCookieJar())
    set_browser_options(br)    
    amazon_signin(br, userid, password)
    books = {}
    get_highlights(br, books)
    dump_books(books)
开发者ID:dnene,项目名称:pykindle,代码行数:10,代码来源:pykindle.py

示例8: kickAss

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import set_cookiejar [as 别名]
class kickAss(Torrent):
    def __init__(self, language, otherFilters, verified, minSize, debug):
        self._urlBase = "https://kat.cr/"
        self._urlSearch = u"https://kat.cr/usearch/{name} {episode}"
        self._languageDict = {"english": 2, "spanish": 14}
        self._minSize = int(minSize)
        self._debug = debug
        extraFilters = u"{otherFilters}{language}{verified}"        
        if self._languageDict.has_key(language):
            self._language = u" lang_id:{0}".format(self._languageDict[language])
        else:
            self._language = ""
        
        if otherFilters != "":
            self._otherFilers = u" "+otherFilters
        else:
            self._otherFilers = ""
        
        if verified:
            self._verified = u" verified:1"
        else:
            self._verified = ""
            
        self._urlSearch = self._urlSearch+extraFilters.format(otherFilters=self._otherFilers,language=self._language,verified=self._verified)+u"/?field=seeders&sorder=desc&rss=1"
        self._browser = Browser()
        self._browser.set_handle_robots(False)
        self._cookieJar = cookielib.LWPCookieJar()
        self._browser.set_cookiejar(self._cookieJar)
        self._browser.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0'), ('Accept', '*/*'), ('Accept-Encoding',"gzip,deflate")]
        self._browser.open(self._urlBase)
        
    def episodeSearch(self, serie, episode):
        searchQuery = self._urlSearch.format(name=serie,episode=episode["number"])
        logging.debug(u"searchURL: {}".format(searchQuery))
        try:
            self._browser.open(searchQuery)
            gzipContent = self._browser.response().read()
            xml = gzip.GzipFile(fileobj=StringIO.StringIO(gzipContent)).read()
            xmldoc = minidom.parseString(xml)
            items = xmldoc.getElementsByTagName('item')
            for item in items:
                contentLength =  int(item.getElementsByTagName("torrent:contentLength")[0].firstChild.data)
                if item.getElementsByTagName("torrent:contentLength")[0].firstChild.data < self._minSize:
                    logging.warning(u"Torrent to small: {}".format(contentLength))
                    continue
                
                return item.getElementsByTagName("torrent:magnetURI")[0].firstChild.data
                break
                
            return None
        except HTTPError, e:
            logging.error( u"There was an error in the URL {}.".format(searchQuery))
            return None
开发者ID:zrrs,项目名称:script.service.series-downloader,代码行数:55,代码来源:kickass.py

示例9: __init__

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import set_cookiejar [as 别名]
class GooglePlayApp:
    def __init__(self):
        self.br = Browser()
        self.cj = CookieJar()
        self.br.set_cookiejar(self.cj)
       	
    def get_app_links(self, uri):
        parts = urlparse.urlparse(uri)
        if "/store/apps/details" in parts.path:
            return [uri]

        res = self.br.open(uri)
        data = res.get_data()
        soup = BeautifulSoup(data, "html5lib")
        snippet = soup.find('div', attrs={"class" : "num-pagination-page"})
        urls = snippet.find_all("a", attrs={"class" : "title"})
		
        return [ parts._replace(path=url.get('href'), query="").geturl() for url in urls ]
			
    def get_app_info(self, uri):
        res = self.br.open(uri)
        data = res.get_data() 
        soup = BeautifulSoup(data, "html5lib", from_encoding="utf-8")
        info = AppInfo()
        # info.name = soup.find('h1', attrs={"class" : "doc-banner-title"}).text
        info.name = soup.find('div', attrs={"class" : "document-title"}).text
        info.category = ""
        info.version = ""
        info.size = ""
        info.updated = ""
        info.price = ""
        info.os = "Android"
        # info.developer = soup.find('a', attrs={"class" : "doc-header-link"}).text
        info.developer = soup.find('a', attrs={"class" : "document-subtitle"}).text
        info.language = ""
        # desc = soup.find(id="doc-original-text").prettify()
        desc = soup.find('div', attrs={"class" : "show-more-content"}).prettify()
        
        info.description = desc

        # artwork = soup.find('div', attrs={"class" : "doc-banner-icon"}).img["src"]
        artwork = soup.find('div', attrs={"class" : "cover-container"}).img["src"]
        info.artwork = artwork

        # div_images = soup.find('div', attrs={"class" : "screenshot-carousel-content-container"})
        div_images = soup.find('div', attrs={"class" : "thumbnails"})
        images = div_images.find_all('img')
        info.images = [img["src"] for img in images]

        info.debug()
        return info
开发者ID:lite,项目名称:yebob_utils,代码行数:53,代码来源:GooglePlayApp.py

示例10: lockNloadBrowser

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import set_cookiejar [as 别名]
def lockNloadBrowser():
    br = Browser()
    cj = cookielib.LWPCookieJar()
    br.set_cookiejar(cj)
    br.set_handle_equiv(True)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(True)
    br.set_handle_robots(False)

    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
    br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
    return br
开发者ID:Daiver,项目名称:jff,代码行数:16,代码来源:parsing.py

示例11: criteria

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import set_cookiejar [as 别名]
    def criteria(self):
        '''
        return a tuple: 
        0) True if the server is up, otherwise False
        1) True if logging results in no errors, False otherwise
        2) Username if it can be discerned otherwise False
        '''
        criteria = {
        'server_is_up': False,
        'can_log_in': False,
        'username_as_discerned': False,
        }            
            
    	login_url = resources.LOGIN
    	
    	browser = Browser()
    	cookies = LWPCookieJar()
    	browser.set_cookiejar(cookies)      
        
        #First we'll see if the server is up at all.
        try:
            browser.open(login_url)
        except URLError:
            return criteria #Return all three as False         
        criteria['server_is_up'] = True #We got past the first block; we know that the server is up.
        
        
        #Second, we'll see if the login page properly accepts our login attempt.
        try:
            browser.select_form(name='login')
            browser['username'] = account_credentials.HEARTBEAT_USERNAME
            browser["password"] = account_credentials.HEARTBEAT_PASSWORD        
            response_to_login = browser.submit()
        except:
            return criteria #Server_is_up will be True, the other two are False.        
        criteria['can_log_in'] = True #We were able to login in the above block.


        #Last, we'll see if we are recognized as a logged-in user.            
        try:
            response_to_whoami = browser.open(resources.WHOAMI_API)
            user_info_dict = json.loads(response_to_whoami.read())
            username = user_info_dict["username"]
        except:
            return criteria #Only username_as_discerned is False.        
        criteria['username_as_discerned'] = username #We figured out the username

        
        #Everything went OK!
        return criteria
开发者ID:SlashRoot,项目名称:WHAT,代码行数:52,代码来源:heartbeat.py

示例12: __init__

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import set_cookiejar [as 别名]
class Api:
    appid = '5415093'
    token = None
    query_pattern = 'https://api.vk.com/method/%s?%s&access_token='

    ua = 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1'
    br = None
    cl = None

    def __init__(self, login, password, scope, testmode=False):
        self.br = Browser()
        self.cl = cookielib.LWPCookieJar()
        self.br.set_cookiejar(self.cl)

        self.br.set_handle_equiv(True)
        self.br.set_handle_redirect(True)
        self.br.set_handle_referer(True)
        self.br.set_handle_robots(False)
        self.br.set_handle_refresh(_http.HTTPRefreshProcessor(), max_time=1)
        self.br.addheaders = [('User-agent', self.ua)]

        self.br.open('https://oauth.vk.com/authorize?client_id=' + self.appid +
                     '&scope=' + scope + '&redirect_uri=http://oauth.vk.com/blank.html' +
                     '&display=mobile&response_type=token')

        self.br.select_form(nr=0)
        self.br.form['email'] = login
        self.br.form['pass'] = password
        self.br.submit()

        if len(list(self.br.forms())) > 0:
            self.br.select_form(nr=0)
            self.br.submit()

        params = urlparse.urlparse(self.br.geturl()).fragment
        params = params.split('&')

        for val in params:
            tp = val.split('=')
            if tp[0] == 'access_token':
                self.token = tp[1]
                self.query_pattern += self.token
                if testmode:
                    self.query_pattern += '&test_mode=1'
                break

    def query(self, func, data):
        response = self.br.open(self.query_pattern % (func, data))
        return response.read()
开发者ID:Mu57Di3,项目名称:vkGetNews,代码行数:51,代码来源:vkApi.py

示例13: main

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import set_cookiejar [as 别名]
def main():
    cj = cookielib.LWPCookieJar('cookie.txt')
    try:
        cj.load('cookie.txt')
    except IOError:
        print "Кукисы не найдены"
    root_url = 'http://188.134.114.45/store1/admin/'
    admin_url ='http://188.134.114.45/store1/admin/index.php?route=common/home&token='
    suppler_url = "http://188.134.114.45/store1/admin/index.php?route=catalog/suppler/update&token="
    orinon1_url = '&suppler_id=1'
    orinon2_url = '&suppler_id=2'
    username = 'admin'
    userpass = '1q2w'
    br = Browser()
    br.set_handle_robots(False)
    br.set_cookiejar(cj)
    br.addheaders = [('User-agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6')]
    home_page = br.open(root_url)

    print br.title()


    br.select_form(nr=0)
    br["username"] = username
    br["password"] = userpass
    br.submit()
    token = getToken(br.geturl())
    cj.save('cookie.txt')

    br.open(suppler_url + token + orinon1_url)
    br.select_form(nr = 1)
    control = br.form.find_control("command")
    control.set_value_by_label(("Удалить товары",))

    res = br.submit()


    br.open(suppler_url + token + orinon2_url)
    br.select_form(nr = 1)
    control = br.form.find_control("command")
    control.set_value_by_label(("Удалить товары",))

    res = br.submit()
开发者ID:CoderGosha,项目名称:UpdateDiscounter,代码行数:45,代码来源:main.py

示例14: login

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import set_cookiejar [as 别名]
 def login(self):
     br = Browser()
     cj = cookielib.LWPCookieJar()
     br.set_cookiejar(cj)
     
     br.set_handle_equiv(True)
     br.set_handle_redirect(True)
     br.set_handle_referer(True)
     br.set_handle_robots(False)
     br.set_handle_refresh(_http.HTTPRefreshProcessor(), max_time=2)
     
     br.open('http://www.fatsecret.com/Auth.aspx?pa=s')
     br.select_form(nr=0)
     #name attr of login tr
     #PLEASE input your username and password here!!!!
     br['_ctl0:_ctl7:Logincontrol1:Name'] = 'username'
     br['_ctl0:_ctl7:Logincontrol1:Password'] = 'password'
     br.submit()
     return br
开发者ID:exsonic,项目名称:FatSecret_Crawler,代码行数:21,代码来源:DataExtractor.py

示例15: get_br

# 需要导入模块: from mechanize import Browser [as 别名]
# 或者: from mechanize.Browser import set_cookiejar [as 别名]
def get_br():
    #todo low
    #headers
    #Accept-Encoding: identity
    # Host: _login.weibo.cn
    # Referer: http://weibo.cn/
    # Connection: close
    # User-Agent: Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)
    br = Browser(factory=RobustFactory(), history=NoHistory(),)
    cj = cookielib.LWPCookieJar()
    br.back = back_func
    br.set_cookiejar(cj)
    br.set_handle_equiv(True)
    #br.set_handle_gzip(True) #gzip在mechanize里面还不是正式功能
    br.set_handle_redirect(True)
    br.set_handle_referer(True)
    br.set_handle_robots(False)
    br.set_handle_refresh(HTTPRefreshProcessor(), max_time=10)
    br.addheaders = [('User-agent', USER_AGENT)]
    return br
开发者ID:hackrole,项目名称:scrapy-utils,代码行数:22,代码来源:mechanize_br.py


注:本文中的mechanize.Browser.set_cookiejar方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。