本文整理汇总了Python中kafka.protocol.types.Int32.encode方法的典型用法代码示例。如果您正苦于以下问题:Python Int32.encode方法的具体用法?Python Int32.encode怎么用?Python Int32.encode使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.protocol.types.Int32
的用法示例。
在下文中一共展示了Int32.encode方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_decode_fetch_response_partial
# 需要导入模块: from kafka.protocol.types import Int32 [as 别名]
# 或者: from kafka.protocol.types.Int32 import encode [as 别名]
def test_decode_fetch_response_partial():
encoded = b''.join([
Int32.encode(1), # Num Topics (Array)
String('utf-8').encode('foobar'),
Int32.encode(2), # Num Partitions (Array)
Int32.encode(0), # Partition id
Int16.encode(0), # Error Code
Int64.encode(1234), # Highwater offset
Int32.encode(52), # MessageSet size
Int64.encode(0), # Msg Offset
Int32.encode(18), # Msg Size
struct.pack('>i', 1474775406), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k1', # Key
struct.pack('>i', 2), # Length of value
b'v1', # Value
Int64.encode(1), # Msg Offset
struct.pack('>i', 24), # Msg Size (larger than remaining MsgSet size)
struct.pack('>i', -16383415), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k2', # Key
struct.pack('>i', 8), # Length of value
b'ar', # Value (truncated)
Int32.encode(1),
Int16.encode(0),
Int64.encode(2345),
Int32.encode(52), # MessageSet size
Int64.encode(0), # Msg Offset
Int32.encode(18), # Msg Size
struct.pack('>i', 1474775406), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k1', # Key
struct.pack('>i', 2), # Length of value
b'v1', # Value
Int64.encode(1), # Msg Offset
struct.pack('>i', 24), # Msg Size (larger than remaining MsgSet size)
struct.pack('>i', -16383415), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k2', # Key
struct.pack('>i', 8), # Length of value
b'ar', # Value (truncated)
])
resp = FetchResponse[0].decode(io.BytesIO(encoded))
assert len(resp.topics) == 1
topic, partitions = resp.topics[0]
assert topic == 'foobar'
assert len(partitions) == 2
m1 = MessageSet.decode(
partitions[0][3], bytes_to_read=len(partitions[0][3]))
assert len(m1) == 2
assert m1[1] == (None, None, PartialMessage())
示例2: send_request
# 需要导入模块: from kafka.protocol.types import Int32 [as 别名]
# 或者: from kafka.protocol.types.Int32 import encode [as 别名]
def send_request(self, request, correlation_id=None):
"""Encode and queue a kafka api request for sending.
Arguments:
request (object): An un-encoded kafka request.
correlation_id (int, optional): Optionally specify an ID to
correlate requests with responses. If not provided, an ID will
be generated automatically.
Returns:
correlation_id
"""
log.debug('Sending request %s', request)
if correlation_id is None:
correlation_id = self._next_correlation_id()
header = RequestHeader(request,
correlation_id=correlation_id,
client_id=self._client_id)
message = b''.join([header.encode(), request.encode()])
size = Int32.encode(len(message))
data = size + message
self.bytes_to_send.append(data)
if request.expect_response():
ifr = (correlation_id, request)
self.in_flight_requests.append(ifr)
return correlation_id
示例3: drain_ready
# 需要导入模块: from kafka.protocol.types import Int32 [as 别名]
# 或者: from kafka.protocol.types.Int32 import encode [as 别名]
def drain_ready(self):
"""Compress batch to be ready for send"""
memview = self._buffer.getbuffer()
self._drain_waiter.set_result(None)
if self._compression_type:
_, compressor, attrs = self._COMPRESSORS[self._compression_type]
msg = Message(compressor(memview[4:].tobytes()), attributes=attrs)
encoded = msg.encode()
# if compressed message is longer than original
# we should send it as is (not compressed)
header_size = 16 # 4(all size) + 8(offset) + 4(compressed size)
if len(encoded) + header_size < len(memview):
# write compressed message set (with header) to buffer
# using memory view (for avoid memory copying)
memview[:4] = Int32.encode(len(encoded) + 12)
memview[4:12] = Int64.encode(0) # offset 0
memview[12:16] = Int32.encode(len(encoded))
memview[16:16+len(encoded)] = encoded
self._buffer.seek(0)
return
# update batch size (first 4 bytes of buffer)
memview[:4] = Int32.encode(self._buffer.tell()-4)
self._buffer.seek(0)
示例4: send
# 需要导入模块: from kafka.protocol.types import Int32 [as 别名]
# 或者: from kafka.protocol.types.Int32 import encode [as 别名]
def send(self, request, expect_response=True):
"""send request, return Future()
Can block on network if request is larger than send_buffer_bytes
"""
future = Future()
if self.connecting():
return future.failure(Errors.NodeNotReadyError(str(self)))
elif not self.connected():
return future.failure(Errors.ConnectionError(str(self)))
elif not self.can_send_more():
return future.failure(Errors.TooManyInFlightRequests(str(self)))
correlation_id = self._next_correlation_id()
header = RequestHeader(request,
correlation_id=correlation_id,
client_id=self.config['client_id'])
message = b''.join([header.encode(), request.encode()])
size = Int32.encode(len(message))
try:
# In the future we might manage an internal write buffer
# and send bytes asynchronously. For now, just block
# sending each request payload
self._sock.setblocking(True)
for data in (size, message):
total_sent = 0
while total_sent < len(data):
sent_bytes = self._sock.send(data[total_sent:])
total_sent += sent_bytes
assert total_sent == len(data)
self._sock.setblocking(False)
except (AssertionError, ConnectionError) as e:
log.exception("Error sending %s to %s", request, self)
error = Errors.ConnectionError("%s: %s" % (str(self), e))
self.close(error=error)
return future.failure(error)
log.debug('%s Request %d: %s', self, correlation_id, request)
if expect_response:
ifr = InFlightRequest(request=request,
correlation_id=correlation_id,
response_type=request.RESPONSE_TYPE,
future=future,
timestamp=time.time())
self.in_flight_requests.append(ifr)
else:
future.success(None)
return future
示例5: append
# 需要导入模块: from kafka.protocol.types import Int32 [as 别名]
# 或者: from kafka.protocol.types.Int32 import encode [as 别名]
def append(self, key, value):
"""Append message (key and value) to batch
Returns:
None if batch is full
or
asyncio.Future that will resolved when message is delivered
"""
if self._is_full(key, value):
return None
encoded = Message(value, key=key).encode()
msg = Int64.encode(self._relative_offset) + Int32.encode(len(encoded))
msg += encoded
self._buffer.write(msg)
future = asyncio.Future(loop=self._loop)
self._msg_futures.append(future)
self._relative_offset += 1
return future
示例6: __init__
# 需要导入模块: from kafka.protocol.types import Int32 [as 别名]
# 或者: from kafka.protocol.types.Int32 import encode [as 别名]
def __init__(self, tp, batch_size, compression_type, ttl, loop):
if compression_type:
checker, _, _ = self._COMPRESSORS[compression_type]
assert checker(), 'Compression Libraries Not Found'
self._tp = tp
self._batch_size = batch_size
self._compression_type = compression_type
self._buffer = io.BytesIO()
self._buffer.write(Int32.encode(0)) # first 4 bytes for batch size
self._relative_offset = 0
self._loop = loop
self._ttl = ttl
self._ctime = loop.time()
# Waiters
# Set when messages are delivered to Kafka based on ACK setting
self._msg_futures = []
# Set when sender takes this batch
self._drain_waiter = asyncio.Future(loop=loop)
示例7: _send
# 需要导入模块: from kafka.protocol.types import Int32 [as 别名]
# 或者: from kafka.protocol.types.Int32 import encode [as 别名]
def _send(self, request, expect_response=True):
future = Future()
correlation_id = self._next_correlation_id()
header = RequestHeader(request,
correlation_id=correlation_id,
client_id=self.config['client_id'])
message = b''.join([header.encode(), request.encode()])
size = Int32.encode(len(message))
data = size + message
try:
# In the future we might manage an internal write buffer
# and send bytes asynchronously. For now, just block
# sending each request payload
self._sock.setblocking(True)
total_sent = 0
while total_sent < len(data):
sent_bytes = self._sock.send(data[total_sent:])
total_sent += sent_bytes
assert total_sent == len(data)
if self._sensors:
self._sensors.bytes_sent.record(total_sent)
self._sock.setblocking(False)
except (AssertionError, ConnectionError) as e:
log.exception("Error sending %s to %s", request, self)
error = Errors.ConnectionError("%s: %s" % (str(self), e))
self.close(error=error)
return future.failure(error)
log.debug('%s Request %d: %s', self, correlation_id, request)
if expect_response:
ifr = InFlightRequest(request=request,
correlation_id=correlation_id,
response_type=request.RESPONSE_TYPE,
future=future,
timestamp=time.time())
self.in_flight_requests.append(ifr)
else:
future.success(None)
return future
示例8: _try_authenticate_plain
# 需要导入模块: from kafka.protocol.types import Int32 [as 别名]
# 或者: from kafka.protocol.types.Int32 import encode [as 别名]
def _try_authenticate_plain(self, future):
if self.config['security_protocol'] == 'SASL_PLAINTEXT':
log.warning('%s: Sending username and password in the clear', str(self))
data = b''
try:
self._sock.setblocking(True)
# Send PLAIN credentials per RFC-4616
msg = bytes('\0'.join([self.config['sasl_plain_username'],
self.config['sasl_plain_username'],
self.config['sasl_plain_password']]).encode('utf-8'))
size = Int32.encode(len(msg))
self._sock.sendall(size + msg)
# The server will send a zero sized message (that is Int32(0)) on success.
# The connection is closed on failure
while len(data) < 4:
fragment = self._sock.recv(4 - len(data))
if not fragment:
log.error('%s: Authentication failed for user %s', self, self.config['sasl_plain_username'])
error = Errors.AuthenticationFailedError(
'Authentication failed for user {0}'.format(
self.config['sasl_plain_username']))
future.failure(error)
raise error
data += fragment
self._sock.setblocking(False)
except (AssertionError, ConnectionError) as e:
log.exception("%s: Error receiving reply from server", self)
error = Errors.ConnectionError("%s: %s" % (str(self), e))
future.failure(error)
self.close(error=error)
if data != b'\x00\x00\x00\x00':
return future.failure(Errors.AuthenticationFailedError())
return future.success(True)