本文整理汇总了Python中kafka.protocol.types.Int32类的典型用法代码示例。如果您正苦于以下问题:Python Int32类的具体用法?Python Int32怎么用?Python Int32使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Int32类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_decode_fetch_response_partial
def test_decode_fetch_response_partial():
encoded = b''.join([
Int32.encode(1), # Num Topics (Array)
String('utf-8').encode('foobar'),
Int32.encode(2), # Num Partitions (Array)
Int32.encode(0), # Partition id
Int16.encode(0), # Error Code
Int64.encode(1234), # Highwater offset
Int32.encode(52), # MessageSet size
Int64.encode(0), # Msg Offset
Int32.encode(18), # Msg Size
struct.pack('>i', 1474775406), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k1', # Key
struct.pack('>i', 2), # Length of value
b'v1', # Value
Int64.encode(1), # Msg Offset
struct.pack('>i', 24), # Msg Size (larger than remaining MsgSet size)
struct.pack('>i', -16383415), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k2', # Key
struct.pack('>i', 8), # Length of value
b'ar', # Value (truncated)
Int32.encode(1),
Int16.encode(0),
Int64.encode(2345),
Int32.encode(52), # MessageSet size
Int64.encode(0), # Msg Offset
Int32.encode(18), # Msg Size
struct.pack('>i', 1474775406), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k1', # Key
struct.pack('>i', 2), # Length of value
b'v1', # Value
Int64.encode(1), # Msg Offset
struct.pack('>i', 24), # Msg Size (larger than remaining MsgSet size)
struct.pack('>i', -16383415), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k2', # Key
struct.pack('>i', 8), # Length of value
b'ar', # Value (truncated)
])
resp = FetchResponse[0].decode(io.BytesIO(encoded))
assert len(resp.topics) == 1
topic, partitions = resp.topics[0]
assert topic == 'foobar'
assert len(partitions) == 2
m1 = MessageSet.decode(
partitions[0][3], bytes_to_read=len(partitions[0][3]))
assert len(m1) == 2
assert m1[1] == (None, None, PartialMessage())
示例2: send_request
def send_request(self, request, correlation_id=None):
"""Encode and queue a kafka api request for sending.
Arguments:
request (object): An un-encoded kafka request.
correlation_id (int, optional): Optionally specify an ID to
correlate requests with responses. If not provided, an ID will
be generated automatically.
Returns:
correlation_id
"""
log.debug('Sending request %s', request)
if correlation_id is None:
correlation_id = self._next_correlation_id()
header = RequestHeader(request,
correlation_id=correlation_id,
client_id=self._client_id)
message = b''.join([header.encode(), request.encode()])
size = Int32.encode(len(message))
data = size + message
self.bytes_to_send.append(data)
if request.expect_response():
ifr = (correlation_id, request)
self.in_flight_requests.append(ifr)
return correlation_id
示例3: _process_response
def _process_response(self, read_buffer):
assert not self._processing, 'Recursion not supported'
self._processing = True
ifr = self.in_flight_requests.popleft()
# verify send/recv correlation ids match
recv_correlation_id = Int32.decode(read_buffer)
# 0.8.2 quirk
if (self.config['api_version'] == (0, 8, 2) and
ifr.response_type is GroupCoordinatorResponse and
recv_correlation_id == 0):
raise Errors.KafkaError(
'Kafka 0.8.2 quirk -- try creating a topic first')
elif ifr.correlation_id != recv_correlation_id:
error = Errors.CorrelationIdError(
'Correlation ids do not match: sent %d, recv %d'
% (ifr.correlation_id, recv_correlation_id))
ifr.future.fail(error)
self.close()
self._processing = False
return None
# decode response
response = ifr.response_type.decode(read_buffer)
log.debug('%s Response %d: %s', self, ifr.correlation_id, response)
ifr.future.success(response)
self._processing = False
return response
示例4: decode
def decode(cls, data, bytes_to_read=None):
"""Compressed messages should pass in bytes_to_read (via message size)
otherwise, we decode from data as Int32
"""
if isinstance(data, bytes):
data = io.BytesIO(data)
if bytes_to_read is None:
bytes_to_read = Int32.decode(data)
# if FetchRequest max_bytes is smaller than the available message set
# the server returns partial data for the final message
# So create an internal buffer to avoid over-reading
raw = io.BytesIO(data.read(bytes_to_read))
items = []
while bytes_to_read:
try:
offset = Int64.decode(raw)
msg_bytes = Bytes.decode(raw)
bytes_to_read -= 8 + 4 + len(msg_bytes)
items.append((offset, len(msg_bytes), Message.decode(msg_bytes)))
except ValueError:
# PartialMessage to signal that max_bytes may be too small
items.append((None, None, PartialMessage()))
break
return items
示例5: _process_response
def _process_response(self, read_buffer):
assert not self._processing, 'Recursion not supported'
self._processing = True
ifr = self.in_flight_requests.popleft()
# verify send/recv correlation ids match
recv_correlation_id = Int32.decode(read_buffer)
# 0.8.2 quirk
if (self.config['api_version'] == (0, 8, 2) and
ifr.response_type is GroupCoordinatorResponse[0] and
ifr.correlation_id != 0 and
recv_correlation_id == 0):
log.warning('Kafka 0.8.2 quirk -- GroupCoordinatorResponse'
' coorelation id does not match request. This'
' should go away once at least one topic has been'
' initialized on the broker')
elif ifr.correlation_id != recv_correlation_id:
error = Errors.CorrelationIdError(
'%s: Correlation ids do not match: sent %d, recv %d'
% (str(self), ifr.correlation_id, recv_correlation_id))
ifr.future.failure(error)
self.close()
self._processing = False
return None
# decode response
response = ifr.response_type.decode(read_buffer)
log.debug('%s Response %d: %s', self, ifr.correlation_id, response)
ifr.future.success(response)
self._processing = False
return response
示例6: receive_bytes
def receive_bytes(self, data):
"""Process bytes received from the network.
Arguments:
data (bytes): any length bytes received from a network connection
to a kafka broker.
Returns:
responses (list of (correlation_id, response)): any/all completed
responses, decoded from bytes to python objects.
Raises:
KafkaProtocolError: if the bytes received could not be decoded.
CorrelationIdError: if the response does not match the request
correlation id.
"""
i = 0
n = len(data)
responses = []
while i < n:
# Not receiving is the state of reading the payload header
if not self._receiving:
bytes_to_read = min(4 - self._header.tell(), n - i)
self._header.write(data[i:i+bytes_to_read])
i += bytes_to_read
if self._header.tell() == 4:
self._header.seek(0)
nbytes = Int32.decode(self._header)
# reset buffer and switch state to receiving payload bytes
self._rbuffer = KafkaBytes(nbytes)
self._receiving = True
elif self._header.tell() > 4:
raise Errors.KafkaError('this should not happen - are you threading?')
if self._receiving:
total_bytes = len(self._rbuffer)
staged_bytes = self._rbuffer.tell()
bytes_to_read = min(total_bytes - staged_bytes, n - i)
self._rbuffer.write(data[i:i+bytes_to_read])
i += bytes_to_read
staged_bytes = self._rbuffer.tell()
if staged_bytes > total_bytes:
raise Errors.KafkaError('Receive buffer has more bytes than expected?')
if staged_bytes != total_bytes:
break
self._receiving = False
self._rbuffer.seek(0)
resp = self._process_response(self._rbuffer)
responses.append(resp)
self._reset_buffer()
return responses
示例7: drain_ready
def drain_ready(self):
"""Compress batch to be ready for send"""
memview = self._buffer.getbuffer()
self._drain_waiter.set_result(None)
if self._compression_type:
_, compressor, attrs = self._COMPRESSORS[self._compression_type]
msg = Message(compressor(memview[4:].tobytes()), attributes=attrs)
encoded = msg.encode()
# if compressed message is longer than original
# we should send it as is (not compressed)
header_size = 16 # 4(all size) + 8(offset) + 4(compressed size)
if len(encoded) + header_size < len(memview):
# write compressed message set (with header) to buffer
# using memory view (for avoid memory copying)
memview[:4] = Int32.encode(len(encoded) + 12)
memview[4:12] = Int64.encode(0) # offset 0
memview[12:16] = Int32.encode(len(encoded))
memview[16:16+len(encoded)] = encoded
self._buffer.seek(0)
return
# update batch size (first 4 bytes of buffer)
memview[:4] = Int32.encode(self._buffer.tell()-4)
self._buffer.seek(0)
示例8: send
def send(self, request, expect_response=True):
"""send request, return Future()
Can block on network if request is larger than send_buffer_bytes
"""
future = Future()
if self.connecting():
return future.failure(Errors.NodeNotReadyError(str(self)))
elif not self.connected():
return future.failure(Errors.ConnectionError(str(self)))
elif not self.can_send_more():
return future.failure(Errors.TooManyInFlightRequests(str(self)))
correlation_id = self._next_correlation_id()
header = RequestHeader(request,
correlation_id=correlation_id,
client_id=self.config['client_id'])
message = b''.join([header.encode(), request.encode()])
size = Int32.encode(len(message))
try:
# In the future we might manage an internal write buffer
# and send bytes asynchronously. For now, just block
# sending each request payload
self._sock.setblocking(True)
for data in (size, message):
total_sent = 0
while total_sent < len(data):
sent_bytes = self._sock.send(data[total_sent:])
total_sent += sent_bytes
assert total_sent == len(data)
self._sock.setblocking(False)
except (AssertionError, ConnectionError) as e:
log.exception("Error sending %s to %s", request, self)
error = Errors.ConnectionError("%s: %s" % (str(self), e))
self.close(error=error)
return future.failure(error)
log.debug('%s Request %d: %s', self, correlation_id, request)
if expect_response:
ifr = InFlightRequest(request=request,
correlation_id=correlation_id,
response_type=request.RESPONSE_TYPE,
future=future,
timestamp=time.time())
self.in_flight_requests.append(ifr)
else:
future.success(None)
return future
示例9: _process_response
def _process_response(self, read_buffer):
assert not self._processing, 'Recursion not supported'
self._processing = True
ifr = self.in_flight_requests.popleft()
if self._sensors:
self._sensors.request_time.record((time.time() - ifr.timestamp) * 1000)
# verify send/recv correlation ids match
recv_correlation_id = Int32.decode(read_buffer)
# 0.8.2 quirk
if (self.config['api_version'] == (0, 8, 2) and
ifr.response_type is GroupCoordinatorResponse[0] and
ifr.correlation_id != 0 and
recv_correlation_id == 0):
log.warning('Kafka 0.8.2 quirk -- GroupCoordinatorResponse'
' coorelation id does not match request. This'
' should go away once at least one topic has been'
' initialized on the broker')
elif ifr.correlation_id != recv_correlation_id:
error = Errors.CorrelationIdError(
'%s: Correlation ids do not match: sent %d, recv %d'
% (str(self), ifr.correlation_id, recv_correlation_id))
ifr.future.failure(error)
self.close()
self._processing = False
return None
# decode response
try:
response = ifr.response_type.decode(read_buffer)
except ValueError:
read_buffer.seek(0)
buf = read_buffer.read()
log.error('%s Response %d [ResponseType: %s Request: %s]:'
' Unable to decode %d-byte buffer: %r', self,
ifr.correlation_id, ifr.response_type,
ifr.request, len(buf), buf)
ifr.future.failure(Errors.UnknownError('Unable to decode response'))
self.close()
self._processing = False
return None
log.debug('%s Response %d: %s', self, ifr.correlation_id, response)
ifr.future.success(response)
self._processing = False
return response
示例10: encode
def encode(cls, items, prepend_size=True):
# RecordAccumulator encodes messagesets internally
if isinstance(items, (io.BytesIO, KafkaBytes)):
size = Int32.decode(items)
if prepend_size:
# rewind and return all the bytes
items.seek(items.tell() - 4)
size += 4
return items.read(size)
encoded_values = []
for (offset, message) in items:
encoded_values.append(Int64.encode(offset))
encoded_values.append(Bytes.encode(message))
encoded = b''.join(encoded_values)
if prepend_size:
return Bytes.encode(encoded)
else:
return encoded
示例11: _process_response
def _process_response(self, read_buffer):
recv_correlation_id = Int32.decode(read_buffer)
log.debug('Received correlation id: %d', recv_correlation_id)
if not self.in_flight_requests:
raise Errors.CorrelationIdError(
'No in-flight-request found for server response'
' with correlation ID %d'
% recv_correlation_id)
(correlation_id, request) = self.in_flight_requests.popleft()
# 0.8.2 quirk
if (self._api_version == (0, 8, 2) and
request.RESPONSE_TYPE is GroupCoordinatorResponse[0] and
correlation_id != 0 and
recv_correlation_id == 0):
log.warning('Kafka 0.8.2 quirk -- GroupCoordinatorResponse'
' Correlation ID does not match request. This'
' should go away once at least one topic has been'
' initialized on the broker.')
elif correlation_id != recv_correlation_id:
# return or raise?
raise Errors.CorrelationIdError(
'Correlation IDs do not match: sent %d, recv %d'
% (correlation_id, recv_correlation_id))
# decode response
log.debug('Processing response %s', request.RESPONSE_TYPE.__name__)
try:
response = request.RESPONSE_TYPE.decode(read_buffer)
except ValueError:
read_buffer.seek(0)
buf = read_buffer.read()
log.error('Response %d [ResponseType: %s Request: %s]:'
' Unable to decode %d-byte buffer: %r',
correlation_id, request.RESPONSE_TYPE,
request, len(buf), buf)
raise Errors.KafkaProtocolError('Unable to decode response')
return (correlation_id, response)
示例12: append
def append(self, key, value):
"""Append message (key and value) to batch
Returns:
None if batch is full
or
asyncio.Future that will resolved when message is delivered
"""
if self._is_full(key, value):
return None
encoded = Message(value, key=key).encode()
msg = Int64.encode(self._relative_offset) + Int32.encode(len(encoded))
msg += encoded
self._buffer.write(msg)
future = asyncio.Future(loop=self._loop)
self._msg_futures.append(future)
self._relative_offset += 1
return future
示例13: __init__
def __init__(self, tp, batch_size, compression_type, ttl, loop):
if compression_type:
checker, _, _ = self._COMPRESSORS[compression_type]
assert checker(), 'Compression Libraries Not Found'
self._tp = tp
self._batch_size = batch_size
self._compression_type = compression_type
self._buffer = io.BytesIO()
self._buffer.write(Int32.encode(0)) # first 4 bytes for batch size
self._relative_offset = 0
self._loop = loop
self._ttl = ttl
self._ctime = loop.time()
# Waiters
# Set when messages are delivered to Kafka based on ACK setting
self._msg_futures = []
# Set when sender takes this batch
self._drain_waiter = asyncio.Future(loop=loop)
示例14: _send
def _send(self, request, expect_response=True):
future = Future()
correlation_id = self._next_correlation_id()
header = RequestHeader(request,
correlation_id=correlation_id,
client_id=self.config['client_id'])
message = b''.join([header.encode(), request.encode()])
size = Int32.encode(len(message))
data = size + message
try:
# In the future we might manage an internal write buffer
# and send bytes asynchronously. For now, just block
# sending each request payload
self._sock.setblocking(True)
total_sent = 0
while total_sent < len(data):
sent_bytes = self._sock.send(data[total_sent:])
total_sent += sent_bytes
assert total_sent == len(data)
if self._sensors:
self._sensors.bytes_sent.record(total_sent)
self._sock.setblocking(False)
except (AssertionError, ConnectionError) as e:
log.exception("Error sending %s to %s", request, self)
error = Errors.ConnectionError("%s: %s" % (str(self), e))
self.close(error=error)
return future.failure(error)
log.debug('%s Request %d: %s', self, correlation_id, request)
if expect_response:
ifr = InFlightRequest(request=request,
correlation_id=correlation_id,
response_type=request.RESPONSE_TYPE,
future=future,
timestamp=time.time())
self.in_flight_requests.append(ifr)
else:
future.success(None)
return future
示例15: _try_authenticate_plain
def _try_authenticate_plain(self, future):
if self.config['security_protocol'] == 'SASL_PLAINTEXT':
log.warning('%s: Sending username and password in the clear', str(self))
data = b''
try:
self._sock.setblocking(True)
# Send PLAIN credentials per RFC-4616
msg = bytes('\0'.join([self.config['sasl_plain_username'],
self.config['sasl_plain_username'],
self.config['sasl_plain_password']]).encode('utf-8'))
size = Int32.encode(len(msg))
self._sock.sendall(size + msg)
# The server will send a zero sized message (that is Int32(0)) on success.
# The connection is closed on failure
while len(data) < 4:
fragment = self._sock.recv(4 - len(data))
if not fragment:
log.error('%s: Authentication failed for user %s', self, self.config['sasl_plain_username'])
error = Errors.AuthenticationFailedError(
'Authentication failed for user {0}'.format(
self.config['sasl_plain_username']))
future.failure(error)
raise error
data += fragment
self._sock.setblocking(False)
except (AssertionError, ConnectionError) as e:
log.exception("%s: Error receiving reply from server", self)
error = Errors.ConnectionError("%s: %s" % (str(self), e))
future.failure(error)
self.close(error=error)
if data != b'\x00\x00\x00\x00':
return future.failure(Errors.AuthenticationFailedError())
return future.success(True)