當前位置: 首頁>>代碼示例>>Python>>正文


Python FormRequest.from_response方法代碼示例

本文整理匯總了Python中scrapy.FormRequest.from_response方法的典型用法代碼示例。如果您正苦於以下問題:Python FormRequest.from_response方法的具體用法?Python FormRequest.from_response怎麽用?Python FormRequest.from_response使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在scrapy.FormRequest的用法示例。


在下文中一共展示了FormRequest.from_response方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: parse

# 需要導入模塊: from scrapy import FormRequest [as 別名]
# 或者: from scrapy.FormRequest import from_response [as 別名]
    def parse(self, response) :
#        test_urls = [
#        "http://ntiaoji.kaoyan.com/tjadm/1.html",
#        "http://ntiaoji.kaoyan.com/tjadm/2.html",
#        "http://ntiaoji.kaoyan.com/tjadm/3.html",
#        "http://ntiaoji.kaoyan.com/tjadm/4.html",
#        "http://ntiaoji.kaoyan.com/tjadm/5.html",
#        "http://ntiaoji.kaoyan.com/tjadm/6.html",
#        "http://ntiaoji.kaoyan.com/tjadm/7.html"
#	]
#
#	for url in test_urls :
#	    print url
#	    time.sleep(2)
#	    self.headers['Referer'] = url
#            yield FormRequest.from_response(response,
#	        headers = self.headers,
#	        formdata = {
#	        'username' : 'kytj1',
#	        'password' : '6ujBJ4XQyLeGmJmB'
#	        },
#	        callback = self.download_page,
#	        dont_filter = True
#	    )
        return FormRequest.from_response(response,
	    headers = self.headers,
	    formdata = {
	        'username' : 'kytj1',
	        'password' : '6ujBJ4XQyLeGmJmB'
	    },
	    callback = self.after_login,
	    dont_filter = True
        )
開發者ID:TonyDoen,項目名稱:python_code_review,代碼行數:35,代碼來源:dmoz_spider.py

示例2: parse

# 需要導入模塊: from scrapy import FormRequest [as 別名]
# 或者: from scrapy.FormRequest import from_response [as 別名]
    def parse(self, response):
        """
        這是默認的回調方法,得到response後:
        1. 如果需要登錄,則先通過FormRequest登錄論壇;
        2. 如果不需要登錄,通過Request繼續請求;
        :param response:
        :return:
        """
        # 需要登錄,使用FormRequest.from_response模擬登錄
        if 'id="lsform"' in response.body:
            logging.info('in parse, need to login, url: {0}'.format(response.url))
            form_data = {'handlekey': 'ls', 'quickforward': 'yes', 'username': 'daniell123', 'password': 'admin123'}
            request = FormRequest.from_response(response=response,
                                                headers=self.headers,
                                                formxpath='//form[contains(@id, "lsform")]',
                                                formdata=form_data,
                                                callback=self.parse_list
                                                )
        else:
            logging.info('in parse, NOT need to login, url: {0}'.format(response.url))
            request = Request(url=response.url,
                              headers=self.headers,
                              callback=self.parse_list,
                              )

        yield request
開發者ID:allhu,項目名稱:scrapy_in_practice,代碼行數:28,代碼來源:xiaochuncnjp_spider.py

示例3: _request_next_page

# 需要導入模塊: from scrapy import FormRequest [as 別名]
# 或者: from scrapy.FormRequest import from_response [as 別名]
    def _request_next_page(self, response, date_str, callback):
        current_page = int(response.meta['current_page'])

        total_string = response.css('#LblTotal').xpath('./text()').extract_first(default='')

        total = re.search(r'(\d+)', total_string)

        if total:
            # Deal with the next page.
            total = total.group(1)
            number_of_pages = self._get_number_of_pages(int(total))

            if current_page < number_of_pages:
                current_page += 1

                formdata = {
                    'TxtFecha': date_str,
                    'BtnBuscar': 'Buscar',
                    'LwVisitasCR$DpVisitasCR$ctl02$ctl00.x': '1',
                    'LwVisitasCR$DpVisitasCR$ctl02$ctl00.y': '1'
                }

                request = FormRequest.from_response(response,
                                                    formdata=formdata,
                                                    dont_click=True,
                                                    dont_filter=True,
                                                    callback=callback,
                                                    )

                request.meta['date'] = date_str
                request.meta['current_page'] = current_page

                return request
開發者ID:andyfires,項目名稱:manolo_scraper,代碼行數:35,代碼來源:congreso.py

示例4: parse

# 需要導入模塊: from scrapy import FormRequest [as 別名]
# 或者: from scrapy.FormRequest import from_response [as 別名]
 def parse(self, response):
     form_data = {'username': '[email protected]', 'password': '123456', 'remember_me': '1'}
     return FormRequest.from_response(response,
                                      headers=self.headers,
                                      formxpath='//form[@class="form-login"]',
                                      formdata=form_data,
                                      callback=self.after_login,
                                      )
開發者ID:allhu,項目名稱:scrapy_in_practice,代碼行數:10,代碼來源:fishsaying_spider.py

示例5: parse

# 需要導入模塊: from scrapy import FormRequest [as 別名]
# 或者: from scrapy.FormRequest import from_response [as 別名]
 def parse(self, response):
     yield FormRequest.from_response(
         response,
         formname='aspnetForm',
         formdata={'Skin$body$FundingSourceChoices$0': '1',
                   'Skin$body$FundingSourceChoices$1': '0'},
         meta={'curr_listing_page': 1,  'flag': False},
         callback=self.after_login)
開發者ID:jorbecalona,項目名稱:umichemploymentscrape,代碼行數:10,代碼來源:job_listing_spider.py

示例6: parse

# 需要導入模塊: from scrapy import FormRequest [as 別名]
# 或者: from scrapy.FormRequest import from_response [as 別名]
 def parse(self, response):
     login_form = {
         'login': self.username,
         'password': self.password,
     }
     return FormRequest.from_response(
         response,
         formdata=login_form,
         callback=self.after_login
     )
開發者ID:jessamynsmith,項目名稱:mentor,代碼行數:12,代碼來源:scrape_codementor_payouts.py

示例7: parse

# 需要導入模塊: from scrapy import FormRequest [as 別名]
# 或者: from scrapy.FormRequest import from_response [as 別名]
    def parse(self, response) :
        return FormRequest.from_response(response,
	    headers = self.headers,
	    formdata = {
	        'username' : 'kytj1',
	        'password' : '6ujBJ4XQyLeGmJmB'
	    },
	    callback = self.after_login,
	    dont_filter = True
        )
開發者ID:TonyDoen,項目名稱:python_code_review,代碼行數:12,代碼來源:kytjgo_spider.py

示例8: parse

# 需要導入模塊: from scrapy import FormRequest [as 別名]
# 或者: from scrapy.FormRequest import from_response [as 別名]
 def parse(self, response):
     yield FormRequest.from_response(response,
                                     formdata={
                                         'tanggal': '20160817#Rabu, 17 Agustus 2016',
                                         'origination': 'KAC#KIARACONDONG',
                                         'destination': 'MN#MADIUN',
                                         'adult': '1',
                                         'infant': '0'
                                     },
                                     callback=self.parseInfo)
開發者ID:adityamahesa,項目名稱:SpiderKai,代碼行數:12,代碼來源:Spiders.py

示例9: login

# 需要導入模塊: from scrapy import FormRequest [as 別名]
# 或者: from scrapy.FormRequest import from_response [as 別名]
    def login(self,response):
        # login = requests.post(response.url,
        #                       headers = self.headers,
        #                       data={
        #                              'source':'None',
        #                              'redir':'https://www.douban.com/people/60012975/',
        #                              'form_email':'[email protected]',
        #                              'form_password':'123456789',
        #
        #                              'remember':'on',
        #                              'login':u'登錄'
        #                       })

        hxs = Selector(response)
        if hxs.xpath('//*[@name="captcha-id"]/@value').extract():
            captchaID = hxs.xpath('//*[@name="captcha-id"]/@value').extract()[0]
            captchAdd = hxs.xpath('//*[@id="captcha_image"]/@src').extract()[0]
            urllib.urlretrieve(captchAdd,'captcha.jpg')
            captch = raw_input('please input the captcha:')
            yield FormRequest.from_response(response,
                                            meta =response.meta,
                                            # headers = self.headers,
                                            formdata={'source':'None',
                                                      'redir':'https://www.douban.com/people/unlucky_strike/',
                                                      'form_email':'[email protected]',
                                                      'form_password':'123456789',
                                                      'captcha-solution':captch,
                                                      'captcha-id':captchaID,
                                                      'remember':'on',
                                                      'login':u'登錄'},
                                            callback=self.parse)
        else:
            yield FormRequest.from_response(response,
                                            meta ={'cookiejar':response.meta['cookiejar']},
                                            # headers = self.headers,
                                            formdata={'source':'None',
                                                      'redir':'https://www.douban.com/people/unlucky_strike/',
                                                      'form_email':'[email protected]',
                                                      'form_password':'123456789',
                                                      'remember':'on',
                                                      'login':u'登錄'},
                                            callback=self.parse)
開發者ID:Suluo,項目名稱:spider-Scrapy,代碼行數:44,代碼來源:doubanmovie.py

示例10: parse

# 需要導入模塊: from scrapy import FormRequest [as 別名]
# 或者: from scrapy.FormRequest import from_response [as 別名]
 def parse(self, response):
     '''Parse login page'''
     return FormRequest.from_response(
         response,
         formxpath='//form[contains(@action, "login")]',
         formdata={
             'email': self.username,
             'pass': self.password,
         },
         callback=self.parse_home,
     )
開發者ID:talhashraf,項目名稱:major-scrapy-spiders,代碼行數:13,代碼來源:login.py

示例11: _get_page_request

# 需要導入模塊: from scrapy import FormRequest [as 別名]
# 或者: from scrapy.FormRequest import from_response [as 別名]
    def _get_page_request(self, response, page, date):

        request = FormRequest.from_response(
            response,
            formdata={"txtDesde": date, "__EVENTTARGET": "gvwConsulta", "__EVENTARGUMENT": "Page${}".format(page)},
            dont_filter=True,
            callback=self.parse,
        )

        request.meta["date"] = date

        return request
開發者ID:matiskay,項目名稱:manolo_scraper,代碼行數:14,代碼來源:ambiente.py

示例12: currency_form

# 需要導入模塊: from scrapy import FormRequest [as 別名]
# 或者: from scrapy.FormRequest import from_response [as 別名]
 def currency_form(self, response):
     """
     Currency form viewed and change to USD posted.
     """
     self.log('currency_form', level=logging.INFO)
     formdata = {
         'ddlCountry1': 'United States',
         'ddlCurrency': '503329C6-40CB-47E6-91D1-9F11AF63F706'
     }
     return FormRequest.from_response(response,
                                      formdata=formdata,
                                      callback=self.currency_changed)
開發者ID:ercchy,項目名稱:oxygendemo,代碼行數:14,代碼來源:oxygen.py

示例13: parse_initial_request

# 需要導入模塊: from scrapy import FormRequest [as 別名]
# 或者: from scrapy.FormRequest import from_response [as 別名]
    def parse_initial_request(self, response):
        date = response.meta["date"]

        request = FormRequest.from_response(
            response,
            formdata={"txtDesde": date, "btnBuscar.x": "1", "btnBuscar.y": "1"},
            dont_filter=True,
            callback=self.parse_page,
        )

        request.meta["date"] = date

        yield request
開發者ID:matiskay,項目名稱:manolo_scraper,代碼行數:15,代碼來源:ambiente.py

示例14: parse_home

# 需要導入模塊: from scrapy import FormRequest [as 別名]
# 或者: from scrapy.FormRequest import from_response [as 別名]
 def parse_home(self, response):
     '''Parse user news feed page'''
     if response.css('#approvals_code'):
         # Handle 'Approvals Code' checkpoint (ask user to enter code).
         if not self.code:
             # Show facebook messages via logs
             # and request user for approval code.
             message = response.css('._50f4::text').extract()[0]
             self.log(process_string(message))
             message = response.css('._3-8y._50f4').xpath('string()').extract()[0]
             self.log(process_string(message))
             self.code = input('Enter the code: ')
         self.code = str(self.code)
         if not (self.code and self.code.isdigit()):
             self.log('Bad approvals code detected.')
             return
         return FormRequest.from_response(
             response,
             formdata={'approvals_code': self.code},
             callback=self.parse_home,
         )
     elif response.css('input#u_0_1'):
         # Handle 'Save Browser' checkpoint.
         return FormRequest.from_response(
             response,
             formdata={'name_action_selected': 'dont_save'},
             callback=self.parse_home,
             dont_filter=True,
         )
     elif response.css('button#checkpointSubmitButton'):
         # Handle `Someone tried to log into your account` warning.
         return FormRequest.from_response(
             response, callback=self.parse_home, dont_filter=True,)
     # Else go to the user profile.
     href = response.css('a[title="Profile"]::attr(href)').extract()[0]
     return Request(
         response.urljoin(href),
         callback=self.parse_profile,
     )
開發者ID:talhashraf,項目名稱:major-scrapy-spiders,代碼行數:41,代碼來源:login.py

示例15: parse

# 需要導入模塊: from scrapy import FormRequest [as 別名]
# 或者: from scrapy.FormRequest import from_response [as 別名]
 def parse(self, response):
     """
     Overwrites Spiders parse method. Fill in log in details in log in form and submit.
     :return:
     """
     print('custom settings:')
     print(self._settings)
     return FormRequest.from_response(
         response,
         formxpath='//div[contains(concat(" ", normalize-space(@class), " "), " main-container ")]/descendant::form',
         formdata={'EmailOrUsername': self._settings['username'], 'Password': self._settings['password']},
         callback=self.go_to_search_site
     )
開發者ID:Datafable,項目名稱:epu-index,代碼行數:15,代碼來源:destandaard_spider.py


注:本文中的scrapy.FormRequest.from_response方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。