本文整理汇总了Java中org.apache.kafka.common.requests.AbstractRequest.Builder方法的典型用法代码示例。如果您正苦于以下问题:Java AbstractRequest.Builder方法的具体用法?Java AbstractRequest.Builder怎么用?Java AbstractRequest.Builder使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.common.requests.AbstractRequest
的用法示例。
在下文中一共展示了AbstractRequest.Builder方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ClientRequest
import org.apache.kafka.common.requests.AbstractRequest; //导入方法依赖的package包/类
/**
* @param destination The brokerId to send the request to
* @param requestBuilder The builder for the request to make
* @param correlationId The correlation id for this client request
* @param clientId The client ID to use for the header
* @param createdTimeMs The unix timestamp in milliseconds for the time at which this request was created.
* @param expectResponse Should we expect a response message or is this request complete once it is sent?
* @param callback A callback to execute when the response has been received (or null if no callback is necessary)
*/
public ClientRequest(String destination,
AbstractRequest.Builder<?> requestBuilder,
int correlationId,
String clientId,
long createdTimeMs,
boolean expectResponse,
RequestCompletionHandler callback) {
this.destination = destination;
this.requestBuilder = requestBuilder;
this.correlationId = correlationId;
this.clientId = clientId;
this.createdTimeMs = createdTimeMs;
this.expectResponse = expectResponse;
this.callback = callback;
}
示例2: send
import org.apache.kafka.common.requests.AbstractRequest; //导入方法依赖的package包/类
@Override
public void send(ClientRequest request, long now) {
Iterator<FutureResponse> iterator = futureResponses.iterator();
while (iterator.hasNext()) {
FutureResponse futureResp = iterator.next();
if (futureResp.node != null && !request.destination().equals(futureResp.node.idString()))
continue;
AbstractRequest.Builder<?> builder = request.requestBuilder();
short version = nodeApiVersions.usableVersion(request.apiKey(), builder.desiredVersion());
AbstractRequest abstractRequest = request.requestBuilder().build(version);
if (!futureResp.requestMatcher.matches(abstractRequest))
throw new IllegalStateException("Request matcher did not match next-in-line request " + abstractRequest);
ClientResponse resp = new ClientResponse(request.makeHeader(version), request.callback(), request.destination(),
request.createdTimeMs(), time.milliseconds(), futureResp.disconnected, null, futureResp.responseBody);
responses.add(resp);
iterator.remove();
return;
}
this.requests.add(request);
}
示例3: sendEligibleCalls
import org.apache.kafka.common.requests.AbstractRequest; //导入方法依赖的package包/类
/**
* Send the calls which are ready.
*
* @param now The current time in milliseconds.
* @param callsToSend The calls to send, by node.
* @param correlationIdToCalls A map of correlation IDs to calls.
* @param callsInFlight A map of nodes to the calls they have in flight.
*
* @return The minimum timeout we need for poll().
*/
private long sendEligibleCalls(long now, Map<Node, List<Call>> callsToSend,
Map<Integer, Call> correlationIdToCalls, Map<String, List<Call>> callsInFlight) {
long pollTimeout = Long.MAX_VALUE;
for (Iterator<Map.Entry<Node, List<Call>>> iter = callsToSend.entrySet().iterator();
iter.hasNext(); ) {
Map.Entry<Node, List<Call>> entry = iter.next();
List<Call> calls = entry.getValue();
if (calls.isEmpty()) {
iter.remove();
continue;
}
Node node = entry.getKey();
if (!client.ready(node, now)) {
long nodeTimeout = client.connectionDelay(node, now);
pollTimeout = Math.min(pollTimeout, nodeTimeout);
log.trace("{}: client is not ready to send to {}. Must delay {} ms", clientId, node, nodeTimeout);
continue;
}
Call call = calls.remove(0);
int timeoutMs = calcTimeoutMsRemainingAsInt(now, call.deadlineMs);
AbstractRequest.Builder<?> requestBuilder = null;
try {
requestBuilder = call.createRequest(timeoutMs);
} catch (Throwable throwable) {
call.fail(now, new KafkaException(String.format(
"Internal error sending %s to %s.", call.callName, node)));
continue;
}
ClientRequest clientRequest = client.newClientRequest(node.idString(), requestBuilder, now, true);
log.trace("{}: sending {} to {}. correlationId={}", clientId, requestBuilder, node,
clientRequest.correlationId());
client.send(clientRequest, now);
getOrCreateListValue(callsInFlight, node.idString()).add(call);
correlationIdToCalls.put(clientRequest.correlationId(), call);
}
return pollTimeout;
}
示例4: send
import org.apache.kafka.common.requests.AbstractRequest; //导入方法依赖的package包/类
/**
* Send a new request. Note that the request is not actually transmitted on the
* network until one of the {@link #poll(long)} variants is invoked. At this
* point the request will either be transmitted successfully or will fail.
* Use the returned future to obtain the result of the send. Note that there is no
* need to check for disconnects explicitly on the {@link ClientResponse} object;
* instead, the future will be failed with a {@link DisconnectException}.
*
* @param node The destination of the request
* @param requestBuilder A builder for the request payload
* @return A future which indicates the result of the send.
*/
// 将待发送的请求封装成ClientRequest,然后保存到unsent集合中等待发送
public RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder) {
long now = time.milliseconds();
RequestFutureCompletionHandler completionHandler = new RequestFutureCompletionHandler();
ClientRequest clientRequest = client.newClientRequest(node.idString(), requestBuilder, now, true,
completionHandler);
unsent.put(node, clientRequest);
// wakeup the client in case it is blocking in poll so that we can send the queued request
client.wakeup();
return completionHandler.future;
}
示例5: maybeSendTransactionalRequest
import org.apache.kafka.common.requests.AbstractRequest; //导入方法依赖的package包/类
private boolean maybeSendTransactionalRequest(long now) {
if (transactionManager.isCompleting() && accumulator.hasIncomplete()) {
if (transactionManager.isAborting())
accumulator.abortUndrainedBatches(new KafkaException("Failing batch since transaction was aborted"));
// There may still be requests left which are being retried. Since we do not know whether they had
// been successfully appended to the broker log, we must resend them until their final status is clear.
// If they had been appended and we did not receive the error, then our sequence number would no longer
// be correct which would lead to an OutOfSequenceException.
if (!accumulator.flushInProgress())
accumulator.beginFlush();
}
TransactionManager.TxnRequestHandler nextRequestHandler = transactionManager.nextRequestHandler(accumulator.hasIncomplete());
if (nextRequestHandler == null)
return false;
AbstractRequest.Builder<?> requestBuilder = nextRequestHandler.requestBuilder();
while (true) {
Node targetNode = null;
try {
if (nextRequestHandler.needsCoordinator()) {
targetNode = transactionManager.coordinator(nextRequestHandler.coordinatorType());
if (targetNode == null) {
transactionManager.lookupCoordinator(nextRequestHandler);
break;
}
if (!NetworkClientUtils.awaitReady(client, targetNode, time, requestTimeout)) {
transactionManager.lookupCoordinator(nextRequestHandler);
break;
}
} else {
targetNode = awaitLeastLoadedNodeReady(requestTimeout);
}
if (targetNode != null) {
if (nextRequestHandler.isRetry())
time.sleep(nextRequestHandler.retryBackoffMs());
ClientRequest clientRequest = client.newClientRequest(targetNode.idString(),
requestBuilder, now, true, nextRequestHandler);
transactionManager.setInFlightRequestCorrelationId(clientRequest.correlationId());
log.debug("{}Sending transactional request {} to node {}",
transactionManager.logPrefix, requestBuilder, targetNode);
client.send(clientRequest, now);
return true;
}
} catch (IOException e) {
log.debug("{}Disconnect from {} while trying to send request {}. Going " +
"to back off and retry", transactionManager.logPrefix, targetNode, requestBuilder);
if (nextRequestHandler.needsCoordinator()) {
// We break here so that we pick up the FindCoordinator request immediately.
transactionManager.lookupCoordinator(nextRequestHandler);
break;
}
}
time.sleep(retryBackoffMs);
metadata.requestUpdate();
}
transactionManager.retry(nextRequestHandler);
return true;
}
示例6: doSend
import org.apache.kafka.common.requests.AbstractRequest; //导入方法依赖的package包/类
private void doSend(ClientRequest clientRequest, boolean isInternalRequest, long now) {
String nodeId = clientRequest.destination();
//对于send调用属于外部调用 ,默认传false,外部调用需要验证
if (!isInternalRequest) {
// If this request came from outside the NetworkClient, validate
// that we can send data. If the request is internal, we trust
// that that internal code has done this validation. Validation
// will be slightly different for some internal requests (for
// example, ApiVersionsRequests can be sent prior to being in
// READY state.
//connectionStates.isReady(node) && selector.isChannelReady(node) && inFlightRequests.canSendMore(node);
if (!canSendRequest(nodeId))
throw new IllegalStateException("Attempt to send a request to node " + nodeId + " which is not ready.");
}
AbstractRequest.Builder<?> builder = clientRequest.requestBuilder();
try {
NodeApiVersions versionInfo = apiVersions.get(nodeId);
short version;
// Note: if versionInfo is null, we have no server version information. This would be
// the case when sending the initial ApiVersionRequest which fetches the version
// information itself. It is also the case when discoverBrokerVersions is set to false.
//校验version信息
if (versionInfo == null) {
version = builder.desiredOrLatestVersion();
if (discoverBrokerVersions && log.isTraceEnabled())
log.trace("No version information found when sending message of type {} to node {}. " +
"Assuming version {}.", clientRequest.apiKey(), nodeId, version);
} else {
version = versionInfo.usableVersion(clientRequest.apiKey(), builder.desiredVersion());
}
// The call to build may also throw UnsupportedVersionException, if there are essential
// fields that cannot be represented in the chosen version.
//执行send
doSend(clientRequest, isInternalRequest, now, builder.build(version));
} catch (UnsupportedVersionException e) {
// If the version is not supported, skip sending the request over the wire.
// Instead, simply add it to the local queue of aborted requests.
log.debug("Version mismatch when attempting to send {} to {}",
clientRequest.toString(), clientRequest.destination(), e);
ClientResponse clientResponse = new ClientResponse(clientRequest.makeHeader(builder.desiredOrLatestVersion()),
clientRequest.callback(), clientRequest.destination(), now, now,
false, e, null);
abortedSends.add(clientResponse);
}
}
示例7: newClientRequest
import org.apache.kafka.common.requests.AbstractRequest; //导入方法依赖的package包/类
@Override
public ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs,
boolean expectResponse) {
return newClientRequest(nodeId, requestBuilder, createdTimeMs, expectResponse, null);
}
示例8: requestBuilder
import org.apache.kafka.common.requests.AbstractRequest; //导入方法依赖的package包/类
public AbstractRequest.Builder<?> requestBuilder() {
return requestBuilder;
}
示例9: newClientRequest
import org.apache.kafka.common.requests.AbstractRequest; //导入方法依赖的package包/类
/**
* Create a new ClientRequest.
*
* @param nodeId the node to send to
* @param requestBuilder the request builder to use
* @param createdTimeMs the time in milliseconds to use as the creation time of the request
* @param expectResponse true iff we expect a response
*/
ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder,
long createdTimeMs, boolean expectResponse);
示例10: createRequest
import org.apache.kafka.common.requests.AbstractRequest; //导入方法依赖的package包/类
/**
* Create an AbstractRequest.Builder for this Call.
*
* @param timeoutMs The timeout in milliseconds.
*
* @return The AbstractRequest builder.
*/
abstract AbstractRequest.Builder createRequest(int timeoutMs);
示例11: requestBuilder
import org.apache.kafka.common.requests.AbstractRequest; //导入方法依赖的package包/类
abstract AbstractRequest.Builder<?> requestBuilder();