当前位置: 首页>>代码示例>>Python>>正文


Python HttpParser.get_query_string方法代码示例

本文整理汇总了Python中http_parser.parser.HttpParser.get_query_string方法的典型用法代码示例。如果您正苦于以下问题:Python HttpParser.get_query_string方法的具体用法?Python HttpParser.get_query_string怎么用?Python HttpParser.get_query_string使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在http_parser.parser.HttpParser的用法示例。


在下文中一共展示了HttpParser.get_query_string方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: inject

# 需要导入模块: from http_parser.parser import HttpParser [as 别名]
# 或者: from http_parser.parser.HttpParser import get_query_string [as 别名]
    def inject(self, dest, to_backend, data, http=False):
        modified_data = data
        if http:
            # to_backend = not to_backend
            parser = HttpParser()
            parser.execute(data, len(data))

            query = parser.get_query_string()
            url = parser.get_url()
            body = parser.recv_body()
            if body:
                inject_in = body
            elif query:
                inject_in = query
            else:
                inject_in = url
            modified_data = data.replace(
                inject_in, "%s%s" % (inject_in, os.urandom(100))
            )

            # modified_data = data.replace(inject_in, new_inject_in)
        if not to_backend:      # back to the client
            middle = len(data) / 2
            modified_data = data[:middle] + os.urandom(100) + data[middle:]

        # sending the data tp the backend
        dest.sendall(modified_data)
开发者ID:dhoomakethu,项目名称:vaurien,代码行数:29,代码来源:error.py

示例2: __init__

# 需要导入模块: from http_parser.parser import HttpParser [as 别名]
# 或者: from http_parser.parser.HttpParser import get_query_string [as 别名]
 def __init__(self, raw):
     self.raw = raw
     req = HttpParser()
     req.execute(raw.request, len(raw.request))
     self.headers = req.get_headers()
     self.body = b"".join(req._body)
     self.url = req.get_url()
     self.path = req.get_path()
     self.method = req.get_method()
     self.arguments = req.get_query_string()
     self.slug = [a for a in self.path.split('/') if a != '']
开发者ID:bearstech,项目名称:packetbeat.py,代码行数:13,代码来源:http.py

示例3: proxy

# 需要导入模块: from http_parser.parser import HttpParser [as 别名]
# 或者: from http_parser.parser.HttpParser import get_query_string [as 别名]
def proxy(data):
    """
    the function called by tproxy to determine where to send traffic

    tproxy will call this function repeatedly for the same connection, as we
    receive more incoming data, until we return something other than None.

    typically our response tells tproxy where to proxy the connection to, but
    may also tell it to hang up, or respond with some error message.
    """

    log = logging.getLogger("proxy")

    bytes_received = len(data)

    parser =  HttpParser()
    bytes_parsed = parser.execute(data, bytes_received)

    if bytes_parsed != bytes_received:
        return { 'close': 
            'HTTP/1.0 400 Bad Request\r\n\r\nParse error' }

    if not parser.is_headers_complete(): 
        if bytes_received > MAX_HEADER_LENGTH:
            return { 'close': 
                'HTTP/1.0 400 Bad Request\r\n'
                '\r\nHeaders are too large' }
        return None

    headers = parser.get_headers()

    # the hostname may be in the form of hostname:port, in which case we want
    # to discard the port, and route just on hostname
    route_host = headers.get('HOST', None)
    if route_host:
        match = _HOST_PORT_REGEXP.match(route_host)
        if match:
            route_host = match.group(1)

    try:
        log.debug("Routing %r" % ( parser.get_url(), ))
        return _ROUTER.route(
            route_host,
            parser.get_method(),
            parser.get_path(),
            parser.get_query_string())
    except Exception, err:
        log.error("error routing %r, %s" % (
            parser.get_url(), traceback.format_exc(), ))
        gevent.sleep(ERROR_DELAY)
        return { 'close': 
            'HTTP/1.0 502 Gateway Error\r\n'
            '\r\nError routing request' }
开发者ID:HackLinux,项目名称:nimbus.io,代码行数:55,代码来源:web_director_main.py

示例4: HttpStream

# 需要导入模块: from http_parser.parser import HttpParser [as 别名]
# 或者: from http_parser.parser.HttpParser import get_query_string [as 别名]
class HttpStream(object):
    """ An HTTP parser providing higher-level access to a readable,
    sequential io.RawIOBase object. You can use implementions of
    http_parser.reader (IterReader, StringReader, SocketReader) or
    create your own.
    """

    def __init__(self, stream, kind=HTTP_BOTH, decompress=False):
        """ constructor of HttpStream.

        :attr stream: an io.RawIOBase object
        :attr kind: Int,  could be 0 to parseonly requests,
        1 to parse only responses or 2 if we want to let
        the parser detect the type.
        """
        self.parser = HttpParser(kind=kind, decompress=decompress)
        self.stream = stream

    def _check_headers_complete(self):
        if self.parser.is_headers_complete():
            return

        while True:
            try:
                next(self)
            except StopIteration:
                if self.parser.is_headers_complete():
                    return
                raise NoMoreData("Can't parse headers")

            if self.parser.is_headers_complete():
                return


    def _wait_status_line(self, cond):
        if self.parser.is_headers_complete():
            return True

        data = []
        if not cond():
            while True:
                try:
                    d = next(self)
                    data.append(d)
                except StopIteration:
                    if self.parser.is_headers_complete():
                        return True
                    raise BadStatusLine(b"".join(data))
                if cond():
                    return True
        return True

    def _wait_on_url(self):
        return self._wait_status_line(self.parser.get_url)

    def _wait_on_status(self):
        return self._wait_status_line(self.parser.get_status_code)

    def url(self):
        """ get full url of the request """
        self._wait_on_url()
        return self.parser.get_url()

    def path(self):
        """ get path of the request (url without query string and
        fragment """
        self._wait_on_url()
        return self.parser.get_path()

    def query_string(self):
        """ get query string of the url """
        self._wait_on_url()
        return self.parser.get_query_string()

    def fragment(self):
        """ get fragment of the url """
        self._wait_on_url()
        return self.parser.get_fragment()

    def version(self):
        self._wait_on_status()
        return self.parser.get_version()

    def status_code(self):
        """ get status code of a response as integer """
        self._wait_on_status()
        return self.parser.get_status_code()

    def status(self):
        """ return complete status with reason """
        status_code = self.status_code()
        reason = status_reasons.get(int(status_code), 'unknown')
        return "%s %s" % (status_code, reason)


    def method(self):
        """ get HTTP method as string"""
        self._wait_on_status()
        return self.parser.get_method()

#.........这里部分代码省略.........
开发者ID:RedWallaroo,项目名称:Intercept_Proxy_web,代码行数:103,代码来源:http.py

示例5: HTTPProtocol

# 需要导入模块: from http_parser.parser import HttpParser [as 别名]
# 或者: from http_parser.parser.HttpParser import get_query_string [as 别名]
class HTTPProtocol(FlowControlMixin, asyncio.Protocol):

    def __init__(self, stream_reader, callback, loop=None):
        super().__init__(loop=loop)
        self._stream_reader = stream_reader
        self._stream_writer = None

        self._callback = callback
        self._task = None

        self._server = None

    def connection_made(self, transport):
        self._parser = HttpParser()

        self._stream_reader.set_transport(transport)
        self._stream_writer = asyncio.StreamWriter(
            transport,
            self,
            self._stream_reader,
            self._loop,
        )

        # Grab the name of our socket if we have it
        self._server = transport.get_extra_info("sockname")

    def connection_lost(self, exc):
        if exc is None:
            self._stream_reader.feed_eof()
        else:
            self._stream_reader.set_exception(exc)

        super().connection_lost(exc)

    def data_received(self, data):
        # Parse our incoming data with our HTTP parser
        self._parser.execute(data, len(data))

        # If we have not already handled the headers and we've gotten all of
        # them, then invoke the callback with the headers in them.
        if self._task is None and self._parser.is_headers_complete():
            coro = self.dispatch(
                {
                    "server": self._server,
                    "protocol": b"HTTP/" + b".".join(
                        str(x).encode("ascii")
                        for x in self._parser.get_version()
                    ),
                    "method": self._parser.get_method().encode("latin1"),
                    "path": self._parser.get_path().encode("latin1"),
                    "query": self._parser.get_query_string().encode("latin1"),
                    "headers": self._parser.get_headers(),
                },
                self._stream_reader,
                self._stream_writer,
            )
            self._task = asyncio.Task(coro, loop=self._loop)

        # Determine if we have any data in the body buffer and if so feed it
        # to our StreamReader
        if self._parser.is_partial_body():
            self._stream_reader.feed_data(self._parser.recv_body())

        # Determine if we've completed the end of the HTTP request, if we have
        # then we should close our stream reader because there is nothing more
        # to read.
        if self._parser.is_message_complete():
            self._stream_reader.feed_eof()

    def eof_received(self):
        # We've gotten an EOF from the client, so we'll propagate this to our
        # StreamReader
        self._stream_reader.feed_eof()

    @asyncio.coroutine
    def dispatch(self, request, request_body, response):
        # Get the status, headers, and body from the callback. The body must
        # be iterable, and each item can either be a bytes object, or an
        # asyncio coroutine, in which case we'll ``yield from`` on it to wait
        # for it's value.
        status, resp_headers, body = yield from self._callback(
            request,
            request_body,
        )

        # Write out the status line to the client for this request
        # TODO: We probably don't want to hard code HTTP/1.1 here
        response.write(b"HTTP/1.1 " + status + b"\r\n")

        # Write out the headers, taking special care to ensure that any
        # mandatory headers are added.
        # TODO: We need to handle some required headers
        for key, values in resp_headers.items():
            # In order to handle headers which need to have multiple values
            # like Set-Cookie, we allow the value of the header to be an
            # iterable instead of a bytes object, in which case we'll write
            # multiple header lines for this header.
            if isinstance(values, (bytes, bytearray)):
                values = [values]

#.........这里部分代码省略.........
开发者ID:dstufft,项目名称:stein,代码行数:103,代码来源:protocol.py


注:本文中的http_parser.parser.HttpParser.get_query_string方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。