当前位置: 首页>>代码示例>>Python>>正文


Python MockServer.__exit__方法代码示例

本文整理汇总了Python中tests.mockserver.MockServer.__exit__方法的典型用法代码示例。如果您正苦于以下问题:Python MockServer.__exit__方法的具体用法?Python MockServer.__exit__怎么用?Python MockServer.__exit__使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tests.mockserver.MockServer的用法示例。


在下文中一共展示了MockServer.__exit__方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: TestHttpErrorMiddlewareIntegrational

# 需要导入模块: from tests.mockserver import MockServer [as 别名]
# 或者: from tests.mockserver.MockServer import __exit__ [as 别名]
class TestHttpErrorMiddlewareIntegrational(TrialTestCase):
    def setUp(self):
        self.mockserver = MockServer()
        self.mockserver.__enter__()

    def tearDown(self):
        self.mockserver.__exit__(None, None, None)

    @defer.inlineCallbacks
    def test_middleware_works(self):
        crawler = get_crawler(_HttpErrorSpider)
        yield crawler.crawl()
        assert not crawler.spider.skipped, crawler.spider.skipped
        self.assertEqual(crawler.spider.parsed, {"200"})
        self.assertEqual(crawler.spider.failed, {"404", "402", "500"})

    @defer.inlineCallbacks
    def test_logging(self):
        crawler = get_crawler(_HttpErrorSpider)
        with LogCapture() as log:
            yield crawler.crawl(bypass_status_codes={402})
        self.assertEqual(crawler.spider.parsed, {"200", "402"})
        self.assertEqual(crawler.spider.skipped, {"402"})
        self.assertEqual(crawler.spider.failed, {"404", "500"})

        self.assertIn("Ignoring response <404", str(log))
        self.assertIn("Ignoring response <500", str(log))
        self.assertNotIn("Ignoring response <200", str(log))
        self.assertNotIn("Ignoring response <402", str(log))
开发者ID:pasdoy,项目名称:scrapy,代码行数:31,代码来源:test_spidermiddleware_httperror.py

示例2: TestHttpErrorMiddlewareIntegrational

# 需要导入模块: from tests.mockserver import MockServer [as 别名]
# 或者: from tests.mockserver.MockServer import __exit__ [as 别名]
class TestHttpErrorMiddlewareIntegrational(TrialTestCase):
    def setUp(self):
        self.mockserver = MockServer()
        self.mockserver.__enter__()

    def tearDown(self):
        self.mockserver.__exit__(None, None, None)

    @defer.inlineCallbacks
    def test_middleware_works(self):
        crawler = get_crawler(_HttpErrorSpider)
        yield crawler.crawl()
        assert not crawler.spider.skipped, crawler.spider.skipped
        self.assertEqual(crawler.spider.parsed, {'200'})
        self.assertEqual(crawler.spider.failed, {'404', '402', '500'})

    @defer.inlineCallbacks
    def test_logging(self):
        crawler = get_crawler(_HttpErrorSpider)
        yield crawler.crawl(bypass_status_codes={402})
        # print(get_testlog())
        self.assertEqual(crawler.spider.parsed, {'200', '402'})
        self.assertEqual(crawler.spider.skipped, {'402'})
        self.assertEqual(crawler.spider.failed, {'404', '500'})

        log = get_testlog()
        self.assertIn('Ignoring response <404', log)
        self.assertIn('Ignoring response <500', log)
        self.assertNotIn('Ignoring response <200', log)
        self.assertNotIn('Ignoring response <402', log)
开发者ID:0326,项目名称:scrapy,代码行数:32,代码来源:test_spidermiddleware_httperror.py

示例3: Http11MockServerTestCase

# 需要导入模块: from tests.mockserver import MockServer [as 别名]
# 或者: from tests.mockserver.MockServer import __exit__ [as 别名]
class Http11MockServerTestCase(unittest.TestCase):
    """HTTP 1.1 test case with MockServer"""

    if twisted_version < (11, 1, 0):
        skip = "HTTP1.1 not supported in twisted < 11.1.0"

    def setUp(self):
        self.mockserver = MockServer()
        self.mockserver.__enter__()

    def tearDown(self):
        self.mockserver.__exit__(None, None, None)

    @defer.inlineCallbacks
    def test_download_with_content_length(self):
        crawler = get_crawler(SingleRequestSpider)
        # http://localhost:8998/partial set Content-Length to 1024, use download_maxsize= 1000 to avoid
        # download it
        yield crawler.crawl(seed=Request(url="http://localhost:8998/partial", meta={"download_maxsize": 1000}))
        failure = crawler.spider.meta["failure"]
        self.assertIsInstance(failure.value, defer.CancelledError)

    @defer.inlineCallbacks
    def test_download(self):
        crawler = get_crawler(SingleRequestSpider)
        yield crawler.crawl(seed=Request(url="http://localhost:8998"))
        failure = crawler.spider.meta.get("failure")
        self.assertTrue(failure == None)
        reason = crawler.spider.meta["close_reason"]
        self.assertTrue(reason, "finished")

    @defer.inlineCallbacks
    def test_download_gzip_response(self):

        if twisted_version > (12, 3, 0):

            crawler = get_crawler(SingleRequestSpider)
            body = b"1" * 100  # PayloadResource requires body length to be 100
            request = Request("http://localhost:8998/payload", method="POST", body=body, meta={"download_maxsize": 50})
            yield crawler.crawl(seed=request)
            failure = crawler.spider.meta["failure"]
            # download_maxsize < 100, hence the CancelledError
            self.assertIsInstance(failure.value, defer.CancelledError)

            if six.PY2:
                request.headers.setdefault(b"Accept-Encoding", b"gzip,deflate")
                request = request.replace(url="http://localhost:8998/xpayload")
                yield crawler.crawl(seed=request)
                # download_maxsize = 50 is enough for the gzipped response
                failure = crawler.spider.meta.get("failure")
                self.assertTrue(failure == None)
                reason = crawler.spider.meta["close_reason"]
                self.assertTrue(reason, "finished")
            else:
                # See issue https://twistedmatrix.com/trac/ticket/8175
                raise unittest.SkipTest("xpayload only enabled for PY2")
        else:
            raise unittest.SkipTest("xpayload and payload endpoint only enabled for twisted > 12.3.0")
开发者ID:shengcanxu,项目名称:scrapy,代码行数:60,代码来源:test_downloader_handlers.py

示例4: TestCloseSpider

# 需要导入模块: from tests.mockserver import MockServer [as 别名]
# 或者: from tests.mockserver.MockServer import __exit__ [as 别名]
class TestCloseSpider(TestCase):

    def setUp(self):
        self.mockserver = MockServer()
        self.mockserver.__enter__()

    def tearDown(self):
        self.mockserver.__exit__(None, None, None)

    @defer.inlineCallbacks
    def test_closespider_itemcount(self):
        close_on = 5
        crawler = get_crawler(ItemSpider, {'CLOSESPIDER_ITEMCOUNT': close_on})
        yield crawler.crawl()
        reason = crawler.spider.meta['close_reason']
        self.assertEqual(reason, 'closespider_itemcount')
        itemcount = crawler.stats.get_value('item_scraped_count')
        self.assertTrue(itemcount >= close_on)

    @defer.inlineCallbacks
    def test_closespider_pagecount(self):
        close_on = 5
        crawler = get_crawler(FollowAllSpider, {'CLOSESPIDER_PAGECOUNT': close_on})
        yield crawler.crawl()
        reason = crawler.spider.meta['close_reason']
        self.assertEqual(reason, 'closespider_pagecount')
        pagecount = crawler.stats.get_value('response_received_count')
        self.assertTrue(pagecount >= close_on)

    @defer.inlineCallbacks
    def test_closespider_errorcount(self):
        close_on = 5
        crawler = get_crawler(ErrorSpider, {'CLOSESPIDER_ERRORCOUNT': close_on})
        yield crawler.crawl(total=1000000)
        self.flushLoggedErrors(crawler.spider.exception_cls)
        reason = crawler.spider.meta['close_reason']
        self.assertEqual(reason, 'closespider_errorcount')
        key = 'spider_exceptions/{name}'\
                .format(name=crawler.spider.exception_cls.__name__)
        errorcount = crawler.stats.get_value(key)
        self.assertTrue(errorcount >= close_on)

    @defer.inlineCallbacks
    def test_closespider_timeout(self):
        close_on = 0.1
        crawler = get_crawler(FollowAllSpider, {'CLOSESPIDER_TIMEOUT': close_on})
        yield crawler.crawl(total=1000000)
        reason = crawler.spider.meta['close_reason']
        self.assertEqual(reason, 'closespider_timeout')
        stats = crawler.stats
        start = stats.get_value('start_time')
        stop = stats.get_value('finish_time')
        diff = stop - start
        total_seconds = diff.seconds + diff.microseconds
        self.assertTrue(total_seconds >= close_on)
开发者ID:0326,项目名称:scrapy,代码行数:57,代码来源:test_closespider.py

示例5: TestHttpErrorMiddlewareIntegrational

# 需要导入模块: from tests.mockserver import MockServer [as 别名]
# 或者: from tests.mockserver.MockServer import __exit__ [as 别名]
class TestHttpErrorMiddlewareIntegrational(TrialTestCase):
    def setUp(self):
        self.mockserver = MockServer()
        self.mockserver.__enter__()

    def tearDown(self):
        self.mockserver.__exit__(None, None, None)

    @defer.inlineCallbacks
    def test_middleware_works(self):
        crawler = get_crawler(_HttpErrorSpider)
        yield crawler.crawl()
        assert not crawler.spider.skipped, crawler.spider.skipped
        self.assertEqual(crawler.spider.parsed, {'200'})
        self.assertEqual(crawler.spider.failed, {'404', '402', '500'})

    @defer.inlineCallbacks
    def test_logging(self):
        crawler = get_crawler(_HttpErrorSpider)
        with LogCapture() as log:
            yield crawler.crawl(bypass_status_codes={402})
        self.assertEqual(crawler.spider.parsed, {'200', '402'})
        self.assertEqual(crawler.spider.skipped, {'402'})
        self.assertEqual(crawler.spider.failed, {'404', '500'})

        self.assertIn('Ignoring response <404', str(log))
        self.assertIn('Ignoring response <500', str(log))
        self.assertNotIn('Ignoring response <200', str(log))
        self.assertNotIn('Ignoring response <402', str(log))

    @defer.inlineCallbacks
    def test_logging_level(self):
        # HttpError logs ignored responses with level INFO
        crawler = get_crawler(_HttpErrorSpider)
        with LogCapture(level=logging.INFO) as log:
            yield crawler.crawl()
        self.assertEqual(crawler.spider.parsed, {'200'})
        self.assertEqual(crawler.spider.failed, {'404', '402', '500'})

        self.assertIn('Ignoring response <402', str(log))
        self.assertIn('Ignoring response <404', str(log))
        self.assertIn('Ignoring response <500', str(log))
        self.assertNotIn('Ignoring response <200', str(log))

        # with level WARNING, we shouldn't capture anything from HttpError
        crawler = get_crawler(_HttpErrorSpider)
        with LogCapture(level=logging.WARNING) as log:
            yield crawler.crawl()
        self.assertEqual(crawler.spider.parsed, {'200'})
        self.assertEqual(crawler.spider.failed, {'404', '402', '500'})

        self.assertNotIn('Ignoring response <402', str(log))
        self.assertNotIn('Ignoring response <404', str(log))
        self.assertNotIn('Ignoring response <500', str(log))
        self.assertNotIn('Ignoring response <200', str(log))
开发者ID:arush0311,项目名称:scrapy,代码行数:57,代码来源:test_spidermiddleware_httperror.py

示例6: Http11MockServerTestCase

# 需要导入模块: from tests.mockserver import MockServer [as 别名]
# 或者: from tests.mockserver.MockServer import __exit__ [as 别名]
class Http11MockServerTestCase(unittest.TestCase):
    """HTTP 1.1 test case with MockServer"""
    if 'http11' not in optional_features:
        skip = 'HTTP1.1 not supported in twisted < 11.1.0'

    def setUp(self):
        self.mockserver = MockServer()
        self.mockserver.__enter__()

    def tearDown(self):
        self.mockserver.__exit__(None, None, None)

    @defer.inlineCallbacks
    def test_download_with_content_length(self):
        crawler = get_crawler(SingleRequestSpider)
        # http://localhost:8998/partial set Content-Length to 1024, use download_maxsize= 1000 to avoid
        # download it
        yield crawler.crawl(seed=Request(url='http://localhost:8998/partial', meta={'download_maxsize': 1000}))
        failure = crawler.spider.meta['failure']
        self.assertIsInstance(failure.value, defer.CancelledError)

    @defer.inlineCallbacks
    def test_download(self):
        crawler = get_crawler(SingleRequestSpider)
        yield crawler.crawl(seed=Request(url='http://localhost:8998'))
        failure = crawler.spider.meta.get('failure')
        self.assertTrue(failure == None)
        reason = crawler.spider.meta['close_reason']
        self.assertTrue(reason, 'finished')

    @defer.inlineCallbacks
    def test_download_gzip_response(self):

        if six.PY2 and twisted_version > (12, 3, 0):

            crawler = get_crawler(SingleRequestSpider)
            body = '1'*100 # PayloadResource requires body length to be 100
            request = Request('http://localhost:8998/payload', method='POST', body=body, meta={'download_maxsize': 50})
            yield crawler.crawl(seed=request)
            failure = crawler.spider.meta['failure']
            # download_maxsize < 100, hence the CancelledError
            self.assertIsInstance(failure.value, defer.CancelledError)

            request.headers.setdefault('Accept-Encoding', 'gzip,deflate')
            request = request.replace(url='http://localhost:8998/xpayload')
            yield crawler.crawl(seed=request)

            # download_maxsize = 50 is enough for the gzipped response
            failure = crawler.spider.meta.get('failure')
            self.assertTrue(failure == None)
            reason = crawler.spider.meta['close_reason']
            self.assertTrue(reason, 'finished')
        else:
            raise unittest.SkipTest("xpayload and payload endpoint only enabled for twisted > 12.3.0 and python 2.x")
开发者ID:0326,项目名称:scrapy,代码行数:56,代码来源:test_downloader_handlers.py

示例7: Http11MockServerTestCase

# 需要导入模块: from tests.mockserver import MockServer [as 别名]
# 或者: from tests.mockserver.MockServer import __exit__ [as 别名]
class Http11MockServerTestCase(unittest.TestCase):
    """HTTP 1.1 test case with MockServer"""

    def setUp(self):
        self.mockserver = MockServer()
        self.mockserver.__enter__()

    def tearDown(self):
        self.mockserver.__exit__(None, None, None)

    @defer.inlineCallbacks
    def test_download_with_content_length(self):
        crawler = get_crawler(SingleRequestSpider)
        # http://localhost:8998/partial set Content-Length to 1024, use download_maxsize= 1000 to avoid
        # download it
        yield crawler.crawl(seed=Request(url=self.mockserver.url('/partial'), meta={'download_maxsize': 1000}))
        failure = crawler.spider.meta['failure']
        self.assertIsInstance(failure.value, defer.CancelledError)

    @defer.inlineCallbacks
    def test_download(self):
        crawler = get_crawler(SingleRequestSpider)
        yield crawler.crawl(seed=Request(url=self.mockserver.url('')))
        failure = crawler.spider.meta.get('failure')
        self.assertTrue(failure == None)
        reason = crawler.spider.meta['close_reason']
        self.assertTrue(reason, 'finished')

    @defer.inlineCallbacks
    def test_download_gzip_response(self):
        crawler = get_crawler(SingleRequestSpider)
        body = b'1' * 100  # PayloadResource requires body length to be 100
        request = Request(self.mockserver.url('/payload'), method='POST',
                          body=body, meta={'download_maxsize': 50})
        yield crawler.crawl(seed=request)
        failure = crawler.spider.meta['failure']
        # download_maxsize < 100, hence the CancelledError
        self.assertIsInstance(failure.value, defer.CancelledError)

        if six.PY2:
            request.headers.setdefault(b'Accept-Encoding', b'gzip,deflate')
            request = request.replace(url=self.mockserver.url('/xpayload'))
            yield crawler.crawl(seed=request)
            # download_maxsize = 50 is enough for the gzipped response
            failure = crawler.spider.meta.get('failure')
            self.assertTrue(failure == None)
            reason = crawler.spider.meta['close_reason']
            self.assertTrue(reason, 'finished')
        else:
            # See issue https://twistedmatrix.com/trac/ticket/8175
            raise unittest.SkipTest("xpayload only enabled for PY2")
开发者ID:elacuesta,项目名称:scrapy,代码行数:53,代码来源:test_downloader_handlers.py

示例8: FileDownloadCrawlTestCase

# 需要导入模块: from tests.mockserver import MockServer [as 别名]
# 或者: from tests.mockserver.MockServer import __exit__ [as 别名]
class FileDownloadCrawlTestCase(TestCase):
    pipeline_class = 'scrapy.pipelines.files.FilesPipeline'
    store_setting_key = 'FILES_STORE'
    media_key = 'files'
    media_urls_key = 'file_urls'
    expected_checksums = set([
        '5547178b89448faf0015a13f904c936e',
        'c2281c83670e31d8aaab7cb642b824db',
        'ed3f6538dc15d4d9179dae57319edc5f'])

    def setUp(self):
        self.mockserver = MockServer()
        self.mockserver.__enter__()

        # prepare a directory for storing files
        self.tmpmediastore = self.mktemp()
        os.mkdir(self.tmpmediastore)
        self.settings = {
            'ITEM_PIPELINES': {self.pipeline_class: 1},
            self.store_setting_key: self.tmpmediastore,
        }
        self.runner = CrawlerRunner(self.settings)
        self.items = []

    def tearDown(self):
        shutil.rmtree(self.tmpmediastore)
        self.items = []
        self.mockserver.__exit__(None, None, None)

    def _on_item_scraped(self, item):
        self.items.append(item)

    def _create_crawler(self, spider_class, **kwargs):
        crawler = self.runner.create_crawler(spider_class, **kwargs)
        crawler.signals.connect(self._on_item_scraped, signals.item_scraped)
        return crawler

    def _assert_files_downloaded(self, items, logs):
        self.assertEqual(len(items), 1)
        self.assertIn(self.media_key, items[0])

        # check that logs show the expected number of successful file downloads
        file_dl_success = 'File (downloaded): Downloaded file from'
        self.assertEqual(logs.count(file_dl_success), 3)

        # check that the images/files checksums are what we know they should be
        if self.expected_checksums is not None:
            checksums = set(
                i['checksum']
                    for item in items
                        for i in item[self.media_key])
            self.assertEqual(checksums, self.expected_checksums)

        # check that the image files where actually written to the media store
        for item in items:
            for i in item[self.media_key]:
                self.assertTrue(
                    os.path.exists(
                        os.path.join(self.tmpmediastore, i['path'])))

    def _assert_files_download_failure(self, crawler, items, code, logs):

        # check that the item does NOT have the "images/files" field populated
        self.assertEqual(len(items), 1)
        self.assertIn(self.media_key, items[0])
        self.assertFalse(items[0][self.media_key])

        # check that there was 1 successful fetch and 3 other responses with non-200 code
        self.assertEqual(crawler.stats.get_value('downloader/request_method_count/GET'), 4)
        self.assertEqual(crawler.stats.get_value('downloader/response_count'), 4)
        self.assertEqual(crawler.stats.get_value('downloader/response_status_count/200'), 1)
        self.assertEqual(crawler.stats.get_value('downloader/response_status_count/%d' % code), 3)

        # check that logs do show the failure on the file downloads
        file_dl_failure = 'File (code: %d): Error downloading file from' % code
        self.assertEqual(logs.count(file_dl_failure), 3)

        # check that no files were written to the media store
        self.assertEqual(os.listdir(self.tmpmediastore), [])

    @defer.inlineCallbacks
    def test_download_media(self):
        crawler = self._create_crawler(MediaDownloadSpider)
        with LogCapture() as log:
            yield crawler.crawl("http://localhost:8998/files/images/",
                media_key=self.media_key,
                media_urls_key=self.media_urls_key)
        self._assert_files_downloaded(self.items, str(log))

    @defer.inlineCallbacks
    def test_download_media_wrong_urls(self):
        crawler = self._create_crawler(BrokenLinksMediaDownloadSpider)
        with LogCapture() as log:
            yield crawler.crawl("http://localhost:8998/files/images/",
                media_key=self.media_key,
                media_urls_key=self.media_urls_key)
        self._assert_files_download_failure(crawler, self.items, 404, str(log))

    @defer.inlineCallbacks
    def test_download_media_redirected_default_failure(self):
#.........这里部分代码省略.........
开发者ID:LMKight,项目名称:scrapy,代码行数:103,代码来源:test_pipeline_crawl.py

示例9: CrawlTestCase

# 需要导入模块: from tests.mockserver import MockServer [as 别名]
# 或者: from tests.mockserver.MockServer import __exit__ [as 别名]
class CrawlTestCase(TestCase):

    def setUp(self):
        self.mockserver = MockServer()
        self.mockserver.__enter__()

    def tearDown(self):
        self.mockserver.__exit__(None, None, None)

    @defer.inlineCallbacks
    def test_follow_all(self):
        spider = FollowAllSpider()
        yield docrawl(spider)
        self.assertEqual(len(spider.urls_visited), 11)  # 10 + start_url

    @defer.inlineCallbacks
    def test_delay(self):
        # short to long delays
        yield self._test_delay(0.2, False)
        yield self._test_delay(1, False)
        # randoms
        yield self._test_delay(0.2, True)
        yield self._test_delay(1, True)

    @defer.inlineCallbacks
    def _test_delay(self, delay, randomize):
        settings = {"DOWNLOAD_DELAY": delay, 'RANDOMIZE_DOWNLOAD_DELAY': randomize}
        spider = FollowAllSpider(maxlatency=delay * 2)
        yield docrawl(spider, settings)
        t = spider.times
        totaltime = t[-1] - t[0]
        avgd = totaltime / (len(t) - 1)
        tolerance = 0.6 if randomize else 0.2
        self.assertTrue(avgd > delay * (1 - tolerance),
                        "download delay too small: %s" % avgd)

    @defer.inlineCallbacks
    def test_timeout_success(self):
        spider = DelaySpider(n=0.5)
        yield docrawl(spider)
        self.assertTrue(spider.t1 > 0)
        self.assertTrue(spider.t2 > 0)
        self.assertTrue(spider.t2 > spider.t1)

    @defer.inlineCallbacks
    def test_timeout_failure(self):
        spider = DelaySpider(n=0.5)
        yield docrawl(spider, {"DOWNLOAD_TIMEOUT": 0.35})
        self.assertTrue(spider.t1 > 0)
        self.assertTrue(spider.t2 == 0)
        self.assertTrue(spider.t2_err > 0)
        self.assertTrue(spider.t2_err > spider.t1)
        # server hangs after receiving response headers
        spider = DelaySpider(n=0.5, b=1)
        yield docrawl(spider, {"DOWNLOAD_TIMEOUT": 0.35})
        self.assertTrue(spider.t1 > 0)
        self.assertTrue(spider.t2 == 0)
        self.assertTrue(spider.t2_err > 0)
        self.assertTrue(spider.t2_err > spider.t1)

    @defer.inlineCallbacks
    def test_retry_503(self):
        spider = SimpleSpider("http://localhost:8998/status?n=503")
        yield docrawl(spider)
        self._assert_retried()

    @defer.inlineCallbacks
    def test_retry_conn_failed(self):
        spider = SimpleSpider("http://localhost:65432/status?n=503")
        yield docrawl(spider)
        self._assert_retried()

    @defer.inlineCallbacks
    def test_retry_dns_error(self):
        with mock.patch('socket.gethostbyname',
                        side_effect=socket.gaierror(-5, 'No address associated with hostname')):
            spider = SimpleSpider("http://example.com/")
            yield docrawl(spider)
            self._assert_retried()

    @defer.inlineCallbacks
    def test_start_requests_bug_before_yield(self):
        spider = BrokenStartRequestsSpider(fail_before_yield=1)
        yield docrawl(spider)
        errors = self.flushLoggedErrors(ZeroDivisionError)
        self.assertEqual(len(errors), 1)

    @defer.inlineCallbacks
    def test_start_requests_bug_yielding(self):
        spider = BrokenStartRequestsSpider(fail_yielding=1)
        yield docrawl(spider)
        errors = self.flushLoggedErrors(ZeroDivisionError)
        self.assertEqual(len(errors), 1)

    @defer.inlineCallbacks
    def test_start_requests_lazyness(self):
        settings = {"CONCURRENT_REQUESTS": 1}
        spider = BrokenStartRequestsSpider()
        yield docrawl(spider, settings)
        #self.assertTrue(False, spider.seedsseen)
#.........这里部分代码省略.........
开发者ID:elkingtowa,项目名称:pyrake,代码行数:103,代码来源:test_crawl.py

示例10: CrawlTestCase

# 需要导入模块: from tests.mockserver import MockServer [as 别名]
# 或者: from tests.mockserver.MockServer import __exit__ [as 别名]
class CrawlTestCase(TestCase):

    def setUp(self):
        self.mockserver = MockServer()
        self.mockserver.__enter__()
        self.runner = CrawlerRunner()

    def tearDown(self):
        self.mockserver.__exit__(None, None, None)

    @defer.inlineCallbacks
    def test_follow_all(self):
        crawler = self.runner.create_crawler(FollowAllSpider)
        yield crawler.crawl(mockserver=self.mockserver)
        self.assertEqual(len(crawler.spider.urls_visited), 11)  # 10 + start_url

    @defer.inlineCallbacks
    def test_delay(self):
        # short to long delays
        yield self._test_delay(0.2, False)
        yield self._test_delay(1, False)
        # randoms
        yield self._test_delay(0.2, True)
        yield self._test_delay(1, True)

    @defer.inlineCallbacks
    def _test_delay(self, delay, randomize):
        settings = {"DOWNLOAD_DELAY": delay, 'RANDOMIZE_DOWNLOAD_DELAY': randomize}
        crawler = CrawlerRunner(settings).create_crawler(FollowAllSpider)
        yield crawler.crawl(maxlatency=delay * 2, mockserver=self.mockserver)
        t = crawler.spider.times
        totaltime = t[-1] - t[0]
        avgd = totaltime / (len(t) - 1)
        tolerance = 0.6 if randomize else 0.2
        self.assertTrue(avgd > delay * (1 - tolerance),
                        "download delay too small: %s" % avgd)

    @defer.inlineCallbacks
    def test_timeout_success(self):
        crawler = self.runner.create_crawler(DelaySpider)
        yield crawler.crawl(n=0.5, mockserver=self.mockserver)
        self.assertTrue(crawler.spider.t1 > 0)
        self.assertTrue(crawler.spider.t2 > 0)
        self.assertTrue(crawler.spider.t2 > crawler.spider.t1)

    @defer.inlineCallbacks
    def test_timeout_failure(self):
        crawler = CrawlerRunner({"DOWNLOAD_TIMEOUT": 0.35}).create_crawler(DelaySpider)
        yield crawler.crawl(n=0.5, mockserver=self.mockserver)
        self.assertTrue(crawler.spider.t1 > 0)
        self.assertTrue(crawler.spider.t2 == 0)
        self.assertTrue(crawler.spider.t2_err > 0)
        self.assertTrue(crawler.spider.t2_err > crawler.spider.t1)
        # server hangs after receiving response headers
        yield crawler.crawl(n=0.5, b=1, mockserver=self.mockserver)
        self.assertTrue(crawler.spider.t1 > 0)
        self.assertTrue(crawler.spider.t2 == 0)
        self.assertTrue(crawler.spider.t2_err > 0)
        self.assertTrue(crawler.spider.t2_err > crawler.spider.t1)

    @defer.inlineCallbacks
    def test_retry_503(self):
        crawler = self.runner.create_crawler(SimpleSpider)
        with LogCapture() as l:
            yield crawler.crawl(self.mockserver.url("/status?n=503"), mockserver=self.mockserver)
        self._assert_retried(l)

    @defer.inlineCallbacks
    def test_retry_conn_failed(self):
        crawler = self.runner.create_crawler(SimpleSpider)
        with LogCapture() as l:
            yield crawler.crawl("http://localhost:65432/status?n=503", mockserver=self.mockserver)
        self._assert_retried(l)

    @defer.inlineCallbacks
    def test_retry_dns_error(self):
        crawler = self.runner.create_crawler(SimpleSpider)
        with LogCapture() as l:
            # try to fetch the homepage of a non-existent domain
            yield crawler.crawl("http://dns.resolution.invalid./", mockserver=self.mockserver)
        self._assert_retried(l)

    @defer.inlineCallbacks
    def test_start_requests_bug_before_yield(self):
        with LogCapture('scrapy', level=logging.ERROR) as l:
            crawler = self.runner.create_crawler(BrokenStartRequestsSpider)
            yield crawler.crawl(fail_before_yield=1, mockserver=self.mockserver)

        self.assertEqual(len(l.records), 1)
        record = l.records[0]
        self.assertIsNotNone(record.exc_info)
        self.assertIs(record.exc_info[0], ZeroDivisionError)

    @defer.inlineCallbacks
    def test_start_requests_bug_yielding(self):
        with LogCapture('scrapy', level=logging.ERROR) as l:
            crawler = self.runner.create_crawler(BrokenStartRequestsSpider)
            yield crawler.crawl(fail_yielding=1, mockserver=self.mockserver)

        self.assertEqual(len(l.records), 1)
#.........这里部分代码省略.........
开发者ID:ArturGaspar,项目名称:scrapy,代码行数:103,代码来源:test_crawl.py

示例11: ProxyConnectTestCase

# 需要导入模块: from tests.mockserver import MockServer [as 别名]
# 或者: from tests.mockserver.MockServer import __exit__ [as 别名]
class ProxyConnectTestCase(TestCase):

    def setUp(self):
        self.mockserver = MockServer()
        self.mockserver.__enter__()
        self._oldenv = os.environ.copy()
        self._proxy = HTTPSProxy(8888)
        self._proxy.start()
        # Wait for the proxy to start.
        time.sleep(1.0)
        os.environ['http_proxy'] = 'http://scrapy:[email protected]:8888'
        os.environ['https_proxy'] = 'http://scrapy:[email protected]:8888'

    def tearDown(self):
        self.mockserver.__exit__(None, None, None)
        self._proxy.shutdown()
        os.environ = self._oldenv

    @defer.inlineCallbacks
    def test_https_connect_tunnel(self):
        crawler = get_crawler(SimpleSpider)
        with LogCapture() as l:
            yield crawler.crawl("https://localhost:8999/status?n=200")
        self._assert_got_response_code(200, l)

    @defer.inlineCallbacks
    def test_https_noconnect(self):
        os.environ['https_proxy'] = 'http://scrapy:[email protected]:8888?noconnect'
        crawler = get_crawler(SimpleSpider)
        with LogCapture() as l:
            yield crawler.crawl("https://localhost:8999/status?n=200")
        self._assert_got_response_code(200, l)
        os.environ['https_proxy'] = 'http://scrapy:[email protected]:8888'

    @defer.inlineCallbacks
    def test_https_connect_tunnel_error(self):
        crawler = get_crawler(SimpleSpider)
        with LogCapture() as l:
            yield crawler.crawl("https://localhost:99999/status?n=200")
        self._assert_got_tunnel_error(l)

    @defer.inlineCallbacks
    def test_https_tunnel_auth_error(self):
        os.environ['https_proxy'] = 'http://wrong:[email protected]:8888'
        crawler = get_crawler(SimpleSpider)
        with LogCapture() as l:
            yield crawler.crawl("https://localhost:8999/status?n=200")
        # The proxy returns a 407 error code but it does not reach the client;
        # he just sees a TunnelError.
        self._assert_got_tunnel_error(l)
        os.environ['https_proxy'] = 'http://scrapy:[email protected]:8888'

    @defer.inlineCallbacks
    def test_https_tunnel_without_leak_proxy_authorization_header(self):
        request = Request("https://localhost:8999/echo")
        crawler = get_crawler(SingleRequestSpider)
        with LogCapture() as l:
            yield crawler.crawl(seed=request)
        self._assert_got_response_code(200, l)
        echo = json.loads(crawler.spider.meta['responses'][0].body)
        self.assertTrue('Proxy-Authorization' not in echo['headers'])

    @defer.inlineCallbacks
    def test_https_noconnect_auth_error(self):
        os.environ['https_proxy'] = 'http://wrong:[email protected]:8888?noconnect'
        crawler = get_crawler(SimpleSpider)
        with LogCapture() as l:
            yield crawler.crawl("https://localhost:8999/status?n=200")
        self._assert_got_response_code(407, l)

    def _assert_got_response_code(self, code, log):
        self.assertEqual(str(log).count('Crawled (%d)' % code), 1)

    def _assert_got_tunnel_error(self, log):
        self.assertEqual(str(log).count('TunnelError'), 1)
开发者ID:01-,项目名称:scrapy,代码行数:77,代码来源:test_proxy_connect.py

示例12: ProxyConnectTestCase

# 需要导入模块: from tests.mockserver import MockServer [as 别名]
# 或者: from tests.mockserver.MockServer import __exit__ [as 别名]
class ProxyConnectTestCase(TestCase):

    def setUp(self):
        self.mockserver = MockServer()
        self.mockserver.__enter__()
        self._oldenv = os.environ.copy()
        self._proxy = HTTPSProxy(8888)
        self._proxy.start()
        # Wait for the proxy to start.
        time.sleep(1.0)
        os.environ['http_proxy'] = 'http://scrapy:[email protected]:8888'
        os.environ['https_proxy'] = 'http://scrapy:[email protected]:8888'

    def tearDown(self):
        self.mockserver.__exit__(None, None, None)
        self._proxy.shutdown()
        os.environ = self._oldenv

    @defer.inlineCallbacks
    def test_https_connect_tunnel(self):
        spider = SimpleSpider("https://localhost:8999/status?n=200")
        yield docrawl(spider)
        self._assert_got_response_code(200)

    @defer.inlineCallbacks
    def test_https_noconnect(self):
        os.environ['https_proxy'] = 'http://scrapy:[email protected]:8888?noconnect'
        spider = SimpleSpider("https://localhost:8999/status?n=200")
        yield docrawl(spider)
        self._assert_got_response_code(200)
        os.environ['https_proxy'] = 'http://scrapy:[email protected]:8888'

    @defer.inlineCallbacks
    def test_https_connect_tunnel_error(self):
        spider = SimpleSpider("https://localhost:99999/status?n=200")
        yield docrawl(spider)
        self._assert_got_tunnel_error()

    @defer.inlineCallbacks
    def test_https_tunnel_auth_error(self):
        os.environ['https_proxy'] = 'http://wrong:[email protected]:8888'
        spider = SimpleSpider("https://localhost:8999/status?n=200")
        yield docrawl(spider)
        # The proxy returns a 407 error code but it does not reach the client;
        # he just sees a TunnelError.
        self._assert_got_tunnel_error()
        os.environ['https_proxy'] = 'http://scrapy:[email protected]:8888'

    @defer.inlineCallbacks
    def test_https_noconnect_auth_error(self):
        os.environ['https_proxy'] = 'http://wrong:[email protected]:8888?noconnect'
        spider = SimpleSpider("https://localhost:8999/status?n=200")
        yield docrawl(spider)
        self._assert_got_response_code(407)

    def _assert_got_response_code(self, code):
        log = get_testlog()
        self.assertEqual(log.count('Crawled (%d)' % code), 1)

    def _assert_got_tunnel_error(self):
        log = get_testlog()
        self.assertEqual(log.count('TunnelError'), 1)
开发者ID:hellobill,项目名称:scrapy,代码行数:64,代码来源:test_proxy_connect.py


注:本文中的tests.mockserver.MockServer.__exit__方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。