当前位置: 首页>>代码示例>>Python>>正文


Python HTTPHeaders.get方法代码示例

本文整理汇总了Python中tornado.httputil.HTTPHeaders.get方法的典型用法代码示例。如果您正苦于以下问题:Python HTTPHeaders.get方法的具体用法?Python HTTPHeaders.get怎么用?Python HTTPHeaders.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tornado.httputil.HTTPHeaders的用法示例。


在下文中一共展示了HTTPHeaders.get方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _HTTPRequest

# 需要导入模块: from tornado.httputil import HTTPHeaders [as 别名]
# 或者: from tornado.httputil.HTTPHeaders import get [as 别名]
class _HTTPRequest(object):
    def __init__(self, request, data):
        self._underlying_request = request
        method, url, version, headers, self._body = msgpack_unpackb(data)
        if six.PY3:
            method = method.decode()
            url = url.decode()
            version = version.decode()
            headers = [(k.decode(), v.decode()) for k, v in headers]

        self._headers = HTTPHeaders(headers)
        self._meta = {
            'method': method,
            'version': version,
            'host': self._headers.get('Host', ''),
            'remote_addr': self._headers.get('X-Real-IP') or self._headers.get('X-Forwarded-For', ''),
            'query_string': urlparse.urlparse(url).query,
            'cookies': dict(),
            'parsed_cookies': http_parse_cookies(self._headers),
        }
        args = urlparse.parse_qs(urlparse.urlparse(url).query)
        self._files = dict()
        parse_body_arguments(self._headers.get("Content-Type", ""), self._body, args, self._files)
        self._request = dict_list_to_single(args)

    @property
    def headers(self):
        return self._headers

    def hpack_headers(self):
        return self._underlying_request.headers

    @property
    def body(self):
        """Return request body"""
        return self._body

    @property
    def meta(self):
        return self._meta

    @property
    def request(self):
        return self._request

    @property
    def files(self):
        return self._files
开发者ID:cocaine,项目名称:cocaine-framework-python,代码行数:50,代码来源:http_dec.py

示例2: _read_body

# 需要导入模块: from tornado.httputil import HTTPHeaders [as 别名]
# 或者: from tornado.httputil.HTTPHeaders import get [as 别名]
    def _read_body(
        self,
        code: int,
        headers: httputil.HTTPHeaders,
        delegate: httputil.HTTPMessageDelegate,
    ) -> Optional[Awaitable[None]]:
        if "Content-Length" in headers:
            if "Transfer-Encoding" in headers:
                # Response cannot contain both Content-Length and
                # Transfer-Encoding headers.
                # http://tools.ietf.org/html/rfc7230#section-3.3.3
                raise httputil.HTTPInputError(
                    "Response with both Transfer-Encoding and Content-Length"
                )
            if "," in headers["Content-Length"]:
                # Proxies sometimes cause Content-Length headers to get
                # duplicated.  If all the values are identical then we can
                # use them but if they differ it's an error.
                pieces = re.split(r",\s*", headers["Content-Length"])
                if any(i != pieces[0] for i in pieces):
                    raise httputil.HTTPInputError(
                        "Multiple unequal Content-Lengths: %r"
                        % headers["Content-Length"]
                    )
                headers["Content-Length"] = pieces[0]

            try:
                content_length = int(headers["Content-Length"])  # type: Optional[int]
            except ValueError:
                # Handles non-integer Content-Length value.
                raise httputil.HTTPInputError(
                    "Only integer Content-Length is allowed: %s"
                    % headers["Content-Length"]
                )

            if cast(int, content_length) > self._max_body_size:
                raise httputil.HTTPInputError("Content-Length too long")
        else:
            content_length = None

        if code == 204:
            # This response code is not allowed to have a non-empty body,
            # and has an implicit length of zero instead of read-until-close.
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
            if "Transfer-Encoding" in headers or content_length not in (None, 0):
                raise httputil.HTTPInputError(
                    "Response with code %d should not have body" % code
                )
            content_length = 0

        if content_length is not None:
            return self._read_fixed_body(content_length, delegate)
        if headers.get("Transfer-Encoding", "").lower() == "chunked":
            return self._read_chunked_body(delegate)
        if self.is_client:
            return self._read_body_until_close(delegate)
        return None
开发者ID:rgbkrk,项目名称:tornado,代码行数:59,代码来源:http1connection.py

示例3: _can_keep_alive

# 需要导入模块: from tornado.httputil import HTTPHeaders [as 别名]
# 或者: from tornado.httputil.HTTPHeaders import get [as 别名]
 def _can_keep_alive(
     self, start_line: httputil.RequestStartLine, headers: httputil.HTTPHeaders
 ) -> bool:
     if self.params.no_keep_alive:
         return False
     connection_header = headers.get("Connection")
     if connection_header is not None:
         connection_header = connection_header.lower()
     if start_line.version == "HTTP/1.1":
         return connection_header != "close"
     elif (
         "Content-Length" in headers
         or headers.get("Transfer-Encoding", "").lower() == "chunked"
         or getattr(start_line, "method", None) in ("HEAD", "GET")
     ):
         # start_line may be a request or response start line; only
         # the former has a method attribute.
         return connection_header == "keep-alive"
     return False
开发者ID:rgbkrk,项目名称:tornado,代码行数:21,代码来源:http1connection.py

示例4: test_urllib2

# 需要导入模块: from tornado.httputil import HTTPHeaders [as 别名]
# 或者: from tornado.httputil.HTTPHeaders import get [as 别名]
def test_urllib2(scheme, root_span, install_hooks):
    request = urllib2.Request('%s://localhost:9777/proxy' % scheme,
                              headers={'Remote-LOC': 'New New York',
                                       'Remote-Op': 'antiquing'})

    class Response(object):
        def __init__(self):
            self.code = 200
            self.msg = ''

        def info(self):
            return None

    if root_span:
        root_span = mock.MagicMock()
        root_span.context = mock.MagicMock()
        root_span.finish = mock.MagicMock()
        root_span.__exit__ = mock.MagicMock()
    else:
        root_span = None

    span = mock.MagicMock()
    span.set_tag = mock.MagicMock()
    span.finish = mock.MagicMock()

    def inject(span_context, format, carrier):
        carrier['TRACE-ID'] = '123'

    p_do_open = mock.patch('urllib2.AbstractHTTPHandler.do_open',
                           return_value=Response())
    p_start_span = mock.patch.object(opentracing.tracer, 'start_span',
                                     return_value=span)
    p_inject = mock.patch.object(opentracing.tracer, 'inject',
                                 side_effect=inject)
    p_current_span = span_in_context(span=root_span)

    with p_do_open, p_start_span as start_call, p_inject, p_current_span:
        resp = urllib2.urlopen(request)
        expected_references = root_span.context if root_span else None
        start_call.assert_called_once_with(
            operation_name='GET:antiquing',
            child_of=expected_references,
            tags=None,
        )
    assert resp is not None
    span.set_tag.assert_any_call('span.kind', 'client')
    assert span.__enter__.call_count == 1
    assert span.__exit__.call_count == 1, 'ensure finish() was called'
    if root_span:
        assert root_span.__exit__.call_count == 0, 'do not finish root span'

    # verify trace-id was correctly injected into headers
    norm_headers = HTTPHeaders(request.headers)
    assert norm_headers.get('trace-id') == '123'
开发者ID:uber-common,项目名称:opentracing-python-instrumentation,代码行数:56,代码来源:test_sync_client_hooks.py

示例5: _apply_xheaders

# 需要导入模块: from tornado.httputil import HTTPHeaders [as 别名]
# 或者: from tornado.httputil.HTTPHeaders import get [as 别名]
 def _apply_xheaders(self, headers: httputil.HTTPHeaders) -> None:
     """Rewrite the ``remote_ip`` and ``protocol`` fields."""
     # Squid uses X-Forwarded-For, others use X-Real-Ip
     ip = headers.get("X-Forwarded-For", self.remote_ip)
     # Skip trusted downstream hosts in X-Forwarded-For list
     for ip in (cand.strip() for cand in reversed(ip.split(","))):
         if ip not in self.trusted_downstream:
             break
     ip = headers.get("X-Real-Ip", ip)
     if netutil.is_valid_ip(ip):
         self.remote_ip = ip
     # AWS uses X-Forwarded-Proto
     proto_header = headers.get(
         "X-Scheme", headers.get("X-Forwarded-Proto", self.protocol)
     )
     if proto_header:
         # use only the last proto entry if there is more than one
         # TODO: support trusting mutiple layers of proxied protocol
         proto_header = proto_header.split(",")[-1].strip()
     if proto_header in ("http", "https"):
         self.protocol = proto_header
开发者ID:bdarnell,项目名称:tornado,代码行数:23,代码来源:httpserver.py

示例6: headers_received

# 需要导入模块: from tornado.httputil import HTTPHeaders [as 别名]
# 或者: from tornado.httputil.HTTPHeaders import get [as 别名]
 def headers_received(
     self,
     start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
     headers: httputil.HTTPHeaders,
 ) -> Optional[Awaitable[None]]:
     if headers.get("Content-Encoding") == "gzip":
         self._decompressor = GzipDecompressor()
         # Downstream delegates will only see uncompressed data,
         # so rename the content-encoding header.
         # (but note that curl_httpclient doesn't do this).
         headers.add("X-Consumed-Content-Encoding", headers["Content-Encoding"])
         del headers["Content-Encoding"]
     return self._delegate.headers_received(start_line, headers)
开发者ID:rgbkrk,项目名称:tornado,代码行数:15,代码来源:http1connection.py

示例7: ProxyHandler

# 需要导入模块: from tornado.httputil import HTTPHeaders [as 别名]
# 或者: from tornado.httputil.HTTPHeaders import get [as 别名]
class ProxyHandler(tornado.web.StaticFileHandler):
    CHUNK_SIZE = 64 * 1024
    SUPPORTED_METHODS = ['GET', 'CONNECT']

    def initialize(self, path, default_filename=None):
        self.cache_dir = path
        self.url_transpose = self.application.url_transpose

        tornado.web.StaticFileHandler.initialize(self, str(self.cache_dir))

    def data_received(self, chunk):
        raise NotImplementedError()

    def prepare(self):
        self.cacheable_exts = ('.rpm', '.img', '.sqlite.bz2', '.sqlite.gz', '.xml', '.xml.gz', '.qcow2', '.raw.xz',
                               '.iso', 'filelist.gz', 'vmlinuz')

        self.cacheable = False
        self.cache_used = False
        self.cache_file = None
        self.cache_fd = None
        self.cache_url = False

        self.req_code = None
        self.req_path = None
        self.req_headers = None

    def is_cacheable(self, path):
        return path.endswith(self.cacheable_exts)

    @tornado.gen.coroutine
    @tornado.web.asynchronous
    def get(self, path, include_body=True):
        self.req_path = path
        app_log.info('process %s', path)

        url = urlsplit(path)


        self.cache_url = path.replace(url[0] + '://', '')
        self.cacheable = self.is_cacheable(url.path)
        app_log.debug('is cacheable %r', self.cacheable)
        if self.cacheable:
            cache_file = self.url_transpose(path)
            if not cache_file:
                netloc = [x for x in reversed(url.netloc.split('.'))]
                self.cache_file = self.cache_dir / '.'.join(netloc) / url.path[1:]
            else:
                self.cache_file = self.cache_dir / cache_file

        else:
            uri = self.request.uri.encode()
            cache_id = hashlib.sha1(uri).hexdigest()
            cache_path = self.cache_dir / '~' / cache_id[:2]

            cache_info = cache_path / (cache_id + '-url.txt')
            if not cache_info.exists():
                if not cache_info.parent.exists():
                    cache_info.parent.mkdir(parents=True)

                with cache_info.open('w') as f:
                    f.write(uri.decode())

            self.cache_file = cache_path / (cache_id + '-data.txt')

        cache_time = None
        if self.cache_file.exists():
            self.cache_file = self.cache_file.resolve()
            cache_time = self.cache_file.stat().st_mtime

            lifetime = time() - int(self.settings['cache']['lifetime']) * 60 * 60
            app_log.debug('cache time is %r lifetime is %r', cache_time, lifetime)
            if cache_time > lifetime:
                app_log.info('found %s', self.cache_file)

                cache_url = self.cache_file.relative_to(self.cache_dir).as_posix()
                return tornado.web.StaticFileHandler.get(self, cache_url)

            app_log.info('%s lifetime exceeded', self.cache_file)

        args = {k: v[0] for k, v in self.request.arguments.items()}

        app_log.info('fetch %s', self.request.uri)
        if 'Range' in self.request.headers:
            del self.request.headers['Range']

        self.client = AsyncHTTPClient()
        self.client.fetch(self.request.uri,
                          method=self.request.method,
                          body=self.request.body,
                          headers=self.request.headers,
                          follow_redirects=False,
                          if_modified_since=cache_time,
                          allow_nonstandard_methods=True,
                          connect_timeout=int(self.settings['proxy']['timeout']),
                          request_timeout=2 ** 31 - 1,
                          header_callback=self.process_header,
                          streaming_callback=self.process_body,
                          callback=self.process_finish)

#.........这里部分代码省略.........
开发者ID:hurie,项目名称:tyumproxy,代码行数:103,代码来源:handler.py


注:本文中的tornado.httputil.HTTPHeaders.get方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。