当前位置: 首页>>代码示例>>Python>>正文


Python HTTPConnectionPool.closeCachedConnections方法代码示例

本文整理汇总了Python中scrapy.xlib.tx.HTTPConnectionPool.closeCachedConnections方法的典型用法代码示例。如果您正苦于以下问题:Python HTTPConnectionPool.closeCachedConnections方法的具体用法?Python HTTPConnectionPool.closeCachedConnections怎么用?Python HTTPConnectionPool.closeCachedConnections使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scrapy.xlib.tx.HTTPConnectionPool的用法示例。


在下文中一共展示了HTTPConnectionPool.closeCachedConnections方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: HTTP11DownloadHandler

# 需要导入模块: from scrapy.xlib.tx import HTTPConnectionPool [as 别名]
# 或者: from scrapy.xlib.tx.HTTPConnectionPool import closeCachedConnections [as 别名]
class HTTP11DownloadHandler(object):

    def __init__(self, settings):
        self._pool = HTTPConnectionPool(reactor, persistent=True)
        self._pool.maxPersistentPerHost = settings.getint('CONCURRENT_REQUESTS_PER_DOMAIN')
        self._pool._factory.noisy = False
        self._contextFactoryClass = load_object(settings['DOWNLOADER_CLIENTCONTEXTFACTORY'])
        self._contextFactory = self._contextFactoryClass()
        self._disconnect_timeout = 1

    def download_request(self, request, spider):
        """Return a deferred for the HTTP download"""
        agent = ScrapyAgent(contextFactory=self._contextFactory, pool=self._pool)
        return agent.download_request(request)

    def close(self):
        d = self._pool.closeCachedConnections()
        # closeCachedConnections will hang on network or server issues, so
        # we'll manually timeout the deferred.
        #
        # Twisted issue addressing this problem can be found here:
        # https://twistedmatrix.com/trac/ticket/7738.
        #
        # closeCachedConnections doesn't handle external errbacks, so we'll
        # issue a callback after `_disconnect_timeout` seconds.
        delayed_call = reactor.callLater(self._disconnect_timeout, d.callback, [])

        def cancel_delayed_call(result):
            if delayed_call.active():
                delayed_call.cancel()
            return result

        d.addBoth(cancel_delayed_call)
        return d
开发者ID:amogh14,项目名称:fintra,代码行数:36,代码来源:http11.py

示例2: HTTP11DownloadHandler

# 需要导入模块: from scrapy.xlib.tx import HTTPConnectionPool [as 别名]
# 或者: from scrapy.xlib.tx.HTTPConnectionPool import closeCachedConnections [as 别名]
class HTTP11DownloadHandler(object):

    def __init__(self, settings):
        self._pool = HTTPConnectionPool(reactor, persistent=True)
        self._pool.maxPersistentPerHost = settings.getint('CONCURRENT_REQUESTS_PER_DOMAIN')
        self._pool._factory.noisy = False

        self._sslMethod = openssl_methods[settings.get('DOWNLOADER_CLIENT_TLS_METHOD')]
        self._contextFactoryClass = load_object(settings['DOWNLOADER_CLIENTCONTEXTFACTORY'])
        # try method-aware context factory
        try:
            self._contextFactory = self._contextFactoryClass(method=self._sslMethod)
        except TypeError:
            # use context factory defaults
            self._contextFactory = self._contextFactoryClass()
            msg = """
 '%s' does not accept `method` argument (type OpenSSL.SSL method,\
 e.g. OpenSSL.SSL.SSLv23_METHOD).\
 Please upgrade your context factory class to handle it or ignore it.""" % (
                settings['DOWNLOADER_CLIENTCONTEXTFACTORY'],)
            warnings.warn(msg)
        self._default_maxsize = settings.getint('DOWNLOAD_MAXSIZE')
        self._default_warnsize = settings.getint('DOWNLOAD_WARNSIZE')
        self._disconnect_timeout = 1

    def download_request(self, request, spider):
        """Return a deferred for the HTTP download"""
        agent = ScrapyAgent(contextFactory=self._contextFactory, pool=self._pool,
            maxsize=getattr(spider, 'download_maxsize', self._default_maxsize),
            warnsize=getattr(spider, 'download_warnsize', self._default_warnsize))
        return agent.download_request(request)

    def close(self):
        d = self._pool.closeCachedConnections()
        # closeCachedConnections will hang on network or server issues, so
        # we'll manually timeout the deferred.
        #
        # Twisted issue addressing this problem can be found here:
        # https://twistedmatrix.com/trac/ticket/7738.
        #
        # closeCachedConnections doesn't handle external errbacks, so we'll
        # issue a callback after `_disconnect_timeout` seconds.
        delayed_call = reactor.callLater(self._disconnect_timeout, d.callback, [])

        def cancel_delayed_call(result):
            if delayed_call.active():
                delayed_call.cancel()
            return result

        d.addBoth(cancel_delayed_call)
        return d
开发者ID:Manuel4131,项目名称:scrapy,代码行数:53,代码来源:http11.py

示例3: HTTP11DownloadHandler

# 需要导入模块: from scrapy.xlib.tx import HTTPConnectionPool [as 别名]
# 或者: from scrapy.xlib.tx.HTTPConnectionPool import closeCachedConnections [as 别名]
class HTTP11DownloadHandler(object):

    def __init__(self, settings):
        self._pool = HTTPConnectionPool(reactor, persistent=True)
        self._contextFactoryClass = load_object(settings['DOWNLOADER_CLIENTCONTEXTFACTORY'])
        self._contextFactory = self._contextFactoryClass()

    def download_request(self, request, spider):
        """Return a deferred for the HTTP download"""
        agent = ScrapyAgent(contextFactory=self._contextFactory, pool=self._pool)
        return agent.download_request(request)

    def close(self):
        return self._pool.closeCachedConnections()
开发者ID:HolyDays,项目名称:scrapy,代码行数:16,代码来源:http11.py

示例4: HTTP11DownloadHandler

# 需要导入模块: from scrapy.xlib.tx import HTTPConnectionPool [as 别名]
# 或者: from scrapy.xlib.tx.HTTPConnectionPool import closeCachedConnections [as 别名]
class HTTP11DownloadHandler(object):
    def __init__(self, settings):
        self._pool = HTTPConnectionPool(reactor, persistent=True)
        self._pool.maxPersistentPerHost = settings.getint("CONCURRENT_REQUESTS_PER_DOMAIN")
        self._pool._factory.noisy = False
        self._contextFactoryClass = load_object(settings["DOWNLOADER_CLIENTCONTEXTFACTORY"])
        self._contextFactory = self._contextFactoryClass()

    def download_request(self, request, spider):
        """Return a deferred for the HTTP download"""
        agent = ScrapyAgent(contextFactory=self._contextFactory, pool=self._pool)
        return agent.download_request(request)

    def close(self):
        return self._pool.closeCachedConnections()
开发者ID:alvarofierroclavero,项目名称:scrapy,代码行数:17,代码来源:http11.py

示例5: MyDownloadHandler

# 需要导入模块: from scrapy.xlib.tx import HTTPConnectionPool [as 别名]
# 或者: from scrapy.xlib.tx.HTTPConnectionPool import closeCachedConnections [as 别名]
class MyDownloadHandler(object):
    '''下载接口, 被上层所调用'''
    def __init__(self, settings):
        self._pool = HTTPConnectionPool(reactor, persistent=True)
        self._pool.maxPersistentPerHost = settings.getint('CONCURRENT_REQUESTS_PER_DOMAIN')
        self._pool._factory.noisy = False
        self._contextFactoryClass = load_object(settings['DOWNLOADER_CLIENTCONTEXTFACTORY'])
        self._contextFactory = self._contextFactoryClass()

    def download_request(self, request, spider):
        '''下载的主要被调用接口(异步),返回 deferred (twisted 的延迟回调对象)'''
        myDownloader = MyLogicDownloader()
        return myDownloader.download(request)

    def close(self):
        return self._pool.closeCachedConnections()
开发者ID:shaobenbin,项目名称:apk-crawler,代码行数:18,代码来源:mydownloader.py

示例6: SeleniumDownloadHandler

# 需要导入模块: from scrapy.xlib.tx import HTTPConnectionPool [as 别名]
# 或者: from scrapy.xlib.tx.HTTPConnectionPool import closeCachedConnections [as 别名]
class SeleniumDownloadHandler(object):
    """
    download interface
    """

    def __init__(self, settings):
        self._pool = HTTPConnectionPool(reactor, persistent=True)
        self._pool.maxPersistentPerHost = settings.getint('CONCURRENT_REQUESTS_PER_DOMAIN')
        self._pool._factory.noisy = False
        self._contextFactoryClass = load_object(settings['DOWNLOADER_CLIENTCONTEXTFACTORY'])
        self._contextFactory = self._contextFactoryClass()
        self._disconnect_timeout = 1

    def download_request(self, request, spider):
        myDownloader = SeleniumLogicDownloader()
        return myDownloader.download(request)

    def close(self):
        return self._pool.closeCachedConnections()
开发者ID:fennywhu,项目名称:jstest,代码行数:21,代码来源:mydownloader.py


注:本文中的scrapy.xlib.tx.HTTPConnectionPool.closeCachedConnections方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。