本文整理汇总了Java中org.apache.kafka.clients.ClientResponse类的典型用法代码示例。如果您正苦于以下问题:Java ClientResponse类的具体用法?Java ClientResponse怎么用?Java ClientResponse使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ClientResponse类属于org.apache.kafka.clients包,在下文中一共展示了ClientResponse类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: fetchMetadata
import org.apache.kafka.clients.ClientResponse; //导入依赖的package包/类
/**
* Fetch the metadata for all topics
*/
public MetadataResponse fetchMetadata() {
final ClientRequest clientRequest = kafkaClient.newClientRequest(
getAnyReadyBrokerId(),
MetadataRequest.Builder.allTopics(),
Time.SYSTEM.milliseconds(),
true);
final ClientResponse clientResponse = sendRequest(clientRequest);
if (!clientResponse.hasResponse()) {
throw new StreamsException("Empty response for client request.");
}
if (!(clientResponse.responseBody() instanceof MetadataResponse)) {
throw new StreamsException("Inconsistent response type for internal topic metadata request. " +
"Expected MetadataResponse but received " + clientResponse.responseBody().getClass().getName());
}
final MetadataResponse metadataResponse = (MetadataResponse) clientResponse.responseBody();
return metadataResponse;
}
示例2: onComplete
import org.apache.kafka.clients.ClientResponse; //导入依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public void onComplete(ClientResponse response) {
if (response.requestHeader().correlationId() != inFlightRequestCorrelationId) {
fatalError(new RuntimeException("Detected more than one in-flight transactional request."));
} else {
clearInFlightRequestCorrelationId();
if (response.wasDisconnected()) {
log.debug("{}Disconnected from {}. Will retry.", logPrefix, response.destination());
if (this.needsCoordinator())
lookupCoordinator(this.coordinatorType(), this.coordinatorKey());
reenqueue();
} else if (response.versionMismatch() != null) {
fatalError(response.versionMismatch());
} else if (response.hasResponse()) {
log.trace("{}Received transactional response {} for request {}", logPrefix,
response.responseBody(), requestBuilder());
synchronized (TransactionManager.this) {
handleResponse(response.responseBody());
}
} else {
fatalError(new KafkaException("Could not execute transactional request for unknown reasons"));
}
}
}
示例3: sendListOffsetRequest
import org.apache.kafka.clients.ClientResponse; //导入依赖的package包/类
/**
* Send the ListOffsetRequest to a specific broker for the partitions and target timestamps.
*
* @param node The node to send the ListOffsetRequest to.
* @param timestampsToSearch The mapping from partitions to the target timestamps.
* @param requireTimestamp True if we require a timestamp in the response.
* @return A response which can be polled to obtain the corresponding timestamps and offsets.
*/
private RequestFuture<Map<TopicPartition, OffsetData>> sendListOffsetRequest(final Node node,
final Map<TopicPartition, Long> timestampsToSearch,
boolean requireTimestamp) {
ListOffsetRequest.Builder builder = ListOffsetRequest.Builder
.forConsumer(requireTimestamp, isolationLevel)
.setTargetTimes(timestampsToSearch);
log.trace("Sending ListOffsetRequest {} to broker {}", builder, node);
return client.send(node, builder)
.compose(new RequestFutureAdapter<ClientResponse, Map<TopicPartition, OffsetData>>() {
@Override
public void onSuccess(ClientResponse response, RequestFuture<Map<TopicPartition, OffsetData>> future) {
ListOffsetResponse lor = (ListOffsetResponse) response.responseBody();
log.trace("Received ListOffsetResponse {} from broker {}", lor, node);
// 在收到响应结果时调用监听器的回调方法,将结果设置到异步请求中
handleListOffsetResponse(timestampsToSearch, lor, future);
}
});
}
示例4: send
import org.apache.kafka.clients.ClientResponse; //导入依赖的package包/类
@Test
public void send() {
client.prepareResponse(heartbeatResponse(Errors.NONE));
RequestFuture<ClientResponse> future = consumerClient.send(node, heartbeat());
assertEquals(1, consumerClient.pendingRequestCount());
assertEquals(1, consumerClient.pendingRequestCount(node));
assertFalse(future.isDone());
consumerClient.poll(future);
assertTrue(future.isDone());
assertTrue(future.succeeded());
ClientResponse clientResponse = future.value();
HeartbeatResponse response = (HeartbeatResponse) clientResponse.responseBody();
assertEquals(Errors.NONE, response.error());
}
示例5: doNotBlockIfPollConditionIsSatisfied
import org.apache.kafka.clients.ClientResponse; //导入依赖的package包/类
@Test
public void doNotBlockIfPollConditionIsSatisfied() {
NetworkClient mockNetworkClient = EasyMock.mock(NetworkClient.class);
ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(mockNetworkClient, metadata, time, 100, 1000);
// expect poll, but with no timeout
EasyMock.expect(mockNetworkClient.poll(EasyMock.eq(0L), EasyMock.anyLong())).andReturn(Collections.<ClientResponse>emptyList());
EasyMock.replay(mockNetworkClient);
consumerClient.poll(Long.MAX_VALUE, time.milliseconds(), new ConsumerNetworkClient.PollCondition() {
@Override
public boolean shouldBlock() {
return false;
}
});
EasyMock.verify(mockNetworkClient);
}
示例6: blockWhenPollConditionNotSatisfied
import org.apache.kafka.clients.ClientResponse; //导入依赖的package包/类
@Test
public void blockWhenPollConditionNotSatisfied() {
long timeout = 4000L;
NetworkClient mockNetworkClient = EasyMock.mock(NetworkClient.class);
ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(mockNetworkClient, metadata, time, 100, 1000);
EasyMock.expect(mockNetworkClient.inFlightRequestCount()).andReturn(1);
EasyMock.expect(mockNetworkClient.poll(EasyMock.eq(timeout), EasyMock.anyLong())).andReturn(Collections.<ClientResponse>emptyList());
EasyMock.replay(mockNetworkClient);
consumerClient.poll(timeout, time.milliseconds(), new ConsumerNetworkClient.PollCondition() {
@Override
public boolean shouldBlock() {
return true;
}
});
EasyMock.verify(mockNetworkClient);
}
示例7: blockOnlyForRetryBackoffIfNoInflightRequests
import org.apache.kafka.clients.ClientResponse; //导入依赖的package包/类
@Test
public void blockOnlyForRetryBackoffIfNoInflightRequests() {
long retryBackoffMs = 100L;
NetworkClient mockNetworkClient = EasyMock.mock(NetworkClient.class);
ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(mockNetworkClient, metadata, time, retryBackoffMs, 1000L);
EasyMock.expect(mockNetworkClient.inFlightRequestCount()).andReturn(0);
EasyMock.expect(mockNetworkClient.poll(EasyMock.eq(retryBackoffMs), EasyMock.anyLong())).andReturn(Collections.<ClientResponse>emptyList());
EasyMock.replay(mockNetworkClient);
consumerClient.poll(Long.MAX_VALUE, time.milliseconds(), new ConsumerNetworkClient.PollCondition() {
@Override
public boolean shouldBlock() {
return true;
}
});
EasyMock.verify(mockNetworkClient);
}
示例8: produceRequest
import org.apache.kafka.clients.ClientResponse; //导入依赖的package包/类
/**
* Create a produce request from the given record batches
*/
private ClientRequest produceRequest(long now, int destination, short acks, int timeout, List<RecordBatch> batches) {
Map<TopicPartition, ByteBuffer> produceRecordsByPartition = new HashMap<TopicPartition, ByteBuffer>(batches.size());
final Map<TopicPartition, RecordBatch> recordsByPartition = new HashMap<TopicPartition, RecordBatch>(batches.size());
for (RecordBatch batch : batches) {
TopicPartition tp = batch.topicPartition;
produceRecordsByPartition.put(tp, batch.records.buffer());
recordsByPartition.put(tp, batch);
}
ProduceRequest request = new ProduceRequest(acks, timeout, produceRecordsByPartition);
RequestSend send = new RequestSend(Integer.toString(destination),
this.client.nextRequestHeader(ApiKeys.PRODUCE),
request.toStruct());
RequestCompletionHandler callback = new RequestCompletionHandler() {
public void onComplete(ClientResponse response) {
handleProduceResponse(response, recordsByPartition, time.milliseconds());
}
};
return new ClientRequest(now, acks != 0, send, callback);
}
示例9: sendGroupCoordinatorRequest
import org.apache.kafka.clients.ClientResponse; //导入依赖的package包/类
/**
* Discover the current coordinator for the group. Sends a GroupMetadata request to
* one of the brokers. The returned future should be polled to get the result of the request.
* @return A request future which indicates the completion of the metadata request
*/
private RequestFuture<Void> sendGroupCoordinatorRequest() {
// initiate the group metadata request
// find a node to ask about the coordinator
Node node = this.client.leastLoadedNode();
if (node == null) {
// TODO: If there are no brokers left, perhaps we should use the bootstrap set
// from configuration?
return RequestFuture.noBrokersAvailable();
} else {
// create a group metadata request
log.debug("Sending coordinator request for group {} to broker {}", groupId, node);
GroupCoordinatorRequest metadataRequest = new GroupCoordinatorRequest(this.groupId);
return client.send(node, ApiKeys.GROUP_COORDINATOR, metadataRequest)
.compose(new RequestFutureAdapter<ClientResponse, Void>() {
@Override
public void onSuccess(ClientResponse response, RequestFuture<Void> future) {
handleGroupMetadataResponse(response, future);
}
});
}
}
示例10: checkDisconnects
import org.apache.kafka.clients.ClientResponse; //导入依赖的package包/类
private void checkDisconnects(long now) {
// any disconnects affecting requests that have already been transmitted will be handled
// by NetworkClient, so we just need to check whether connections for any of the unsent
// requests have been disconnected; if they have, then we complete the corresponding future
// and set the disconnect flag in the ClientResponse
Iterator<Map.Entry<Node, List<ClientRequest>>> iterator = unsent.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<Node, List<ClientRequest>> requestEntry = iterator.next();
Node node = requestEntry.getKey();
if (client.connectionFailed(node)) {
// Remove entry before invoking request callback to avoid callbacks handling
// coordinator failures traversing the unsent list again.
iterator.remove();
for (ClientRequest request : requestEntry.getValue()) {
RequestFutureCompletionHandler handler =
(RequestFutureCompletionHandler) request.callback();
handler.onComplete(new ClientResponse(request, now, true, null));
}
}
}
}
示例11: sendFetches
import org.apache.kafka.clients.ClientResponse; //导入依赖的package包/类
/**
* Set-up a fetch request for any node that we have assigned partitions for which doesn't have one.
*
*/
public void sendFetches() {
for (Map.Entry<Node, FetchRequest> fetchEntry: createFetchRequests().entrySet()) {
final FetchRequest fetch = fetchEntry.getValue();
client.send(fetchEntry.getKey(), ApiKeys.FETCH, fetch)
.addListener(new RequestFutureListener<ClientResponse>() {
@Override
public void onSuccess(ClientResponse response) {
handleFetchResponse(response, fetch);
}
@Override
public void onFailure(RuntimeException e) {
log.debug("Fetch failed", e);
}
});
}
}
示例12: sendListOffsetRequest
import org.apache.kafka.clients.ClientResponse; //导入依赖的package包/类
/**
* Fetch a single offset before the given timestamp for the partition.
*
* @param topicPartition The partition that needs fetching offset.
* @param timestamp The timestamp for fetching offset.
* @return A response which can be polled to obtain the corresponding offset.
*/
private RequestFuture<Long> sendListOffsetRequest(final TopicPartition topicPartition, long timestamp) {
Map<TopicPartition, ListOffsetRequest.PartitionData> partitions = new HashMap<>(1);
partitions.put(topicPartition, new ListOffsetRequest.PartitionData(timestamp, 1));
PartitionInfo info = metadata.fetch().partition(topicPartition);
if (info == null) {
metadata.add(topicPartition.topic());
log.debug("Partition {} is unknown for fetching offset, wait for metadata refresh", topicPartition);
return RequestFuture.staleMetadata();
} else if (info.leader() == null) {
log.debug("Leader for partition {} unavailable for fetching offset, wait for metadata refresh", topicPartition);
return RequestFuture.leaderNotAvailable();
} else {
Node node = info.leader();
ListOffsetRequest request = new ListOffsetRequest(-1, partitions);
return client.send(node, ApiKeys.LIST_OFFSETS, request)
.compose(new RequestFutureAdapter<ClientResponse, Long>() {
@Override
public void onSuccess(ClientResponse response, RequestFuture<Long> future) {
handleListOffsetResponse(topicPartition, response, future);
}
});
}
}
示例13: handleListOffsetResponse
import org.apache.kafka.clients.ClientResponse; //导入依赖的package包/类
/**
* Callback for the response of the list offset call above.
* @param topicPartition The partition that was fetched
* @param clientResponse The response from the server.
*/
private void handleListOffsetResponse(TopicPartition topicPartition,
ClientResponse clientResponse,
RequestFuture<Long> future) {
ListOffsetResponse lor = new ListOffsetResponse(clientResponse.responseBody());
short errorCode = lor.responseData().get(topicPartition).errorCode;
if (errorCode == Errors.NONE.code()) {
List<Long> offsets = lor.responseData().get(topicPartition).offsets;
if (offsets.size() != 1)
throw new IllegalStateException("This should not happen.");
long offset = offsets.get(0);
log.debug("Fetched offset {} for partition {}", offset, topicPartition);
future.complete(offset);
} else if (errorCode == Errors.NOT_LEADER_FOR_PARTITION.code()
|| errorCode == Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) {
log.debug("Attempt to fetch offsets for partition {} failed due to obsolete leadership information, retrying.",
topicPartition);
future.raise(Errors.forCode(errorCode));
} else {
log.warn("Attempt to fetch offsets for partition {} failed due to: {}",
topicPartition, Errors.forCode(errorCode).message());
future.raise(new StaleMetadataException());
}
}
示例14: send
import org.apache.kafka.clients.ClientResponse; //导入依赖的package包/类
@Test
public void send() {
client.prepareResponse(heartbeatResponse(Errors.NONE.code()));
RequestFuture<ClientResponse> future = consumerClient.send(node, ApiKeys.METADATA, heartbeatRequest());
assertEquals(1, consumerClient.pendingRequestCount());
assertEquals(1, consumerClient.pendingRequestCount(node));
assertFalse(future.isDone());
consumerClient.poll(future);
assertTrue(future.isDone());
assertTrue(future.succeeded());
ClientResponse clientResponse = future.value();
HeartbeatResponse response = new HeartbeatResponse(clientResponse.responseBody());
assertEquals(Errors.NONE.code(), response.errorCode());
}
示例15: checkBrokerCompatibility
import org.apache.kafka.clients.ClientResponse; //导入依赖的package包/类
/**
* Check if the used brokers have version 0.10.1.x or higher.
* <p>
* Note, for <em>pre</em> 0.10.x brokers the broker version cannot be checked and the client will hang and retry
* until it {@link StreamsConfig#REQUEST_TIMEOUT_MS_CONFIG times out}.
*
* @throws StreamsException if brokers have version 0.10.0.x
*/
public void checkBrokerCompatibility(final boolean eosEnabled) throws StreamsException {
final ClientRequest clientRequest = kafkaClient.newClientRequest(
getAnyReadyBrokerId(),
new ApiVersionsRequest.Builder(),
Time.SYSTEM.milliseconds(),
true);
final ClientResponse clientResponse = sendRequest(clientRequest);
if (!clientResponse.hasResponse()) {
throw new StreamsException("Empty response for client request.");
}
if (!(clientResponse.responseBody() instanceof ApiVersionsResponse)) {
throw new StreamsException("Inconsistent response type for API versions request. " +
"Expected ApiVersionsResponse but received " + clientResponse.responseBody().getClass().getName());
}
final ApiVersionsResponse apiVersionsResponse = (ApiVersionsResponse) clientResponse.responseBody();
if (apiVersionsResponse.apiVersion(ApiKeys.CREATE_TOPICS.id) == null) {
throw new StreamsException("Kafka Streams requires broker version 0.10.1.x or higher.");
}
if (eosEnabled && !brokerSupportsTransactions(apiVersionsResponse)) {
throw new StreamsException("Setting " + PROCESSING_GUARANTEE_CONFIG + "=" + EXACTLY_ONCE + " requires broker version 0.11.0.x or higher.");
}
}