当前位置: 首页>>代码示例>>Java>>正文


Java ClientResponse.responseBody方法代码示例

本文整理汇总了Java中org.apache.kafka.clients.ClientResponse.responseBody方法的典型用法代码示例。如果您正苦于以下问题:Java ClientResponse.responseBody方法的具体用法?Java ClientResponse.responseBody怎么用?Java ClientResponse.responseBody使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.clients.ClientResponse的用法示例。


在下文中一共展示了ClientResponse.responseBody方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: fetchMetadata

import org.apache.kafka.clients.ClientResponse; //导入方法依赖的package包/类
/**
 * Fetch the metadata for all topics
 */
public MetadataResponse fetchMetadata() {

    final ClientRequest clientRequest = kafkaClient.newClientRequest(
        getAnyReadyBrokerId(),
        MetadataRequest.Builder.allTopics(),
        Time.SYSTEM.milliseconds(),
        true);
    final ClientResponse clientResponse = sendRequest(clientRequest);

    if (!clientResponse.hasResponse()) {
        throw new StreamsException("Empty response for client request.");
    }
    if (!(clientResponse.responseBody() instanceof MetadataResponse)) {
        throw new StreamsException("Inconsistent response type for internal topic metadata request. " +
            "Expected MetadataResponse but received " + clientResponse.responseBody().getClass().getName());
    }
    final MetadataResponse metadataResponse = (MetadataResponse) clientResponse.responseBody();
    return metadataResponse;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:23,代码来源:StreamsKafkaClient.java

示例2: send

import org.apache.kafka.clients.ClientResponse; //导入方法依赖的package包/类
@Test
public void send() {
    client.prepareResponse(heartbeatResponse(Errors.NONE));
    RequestFuture<ClientResponse> future = consumerClient.send(node, heartbeat());
    assertEquals(1, consumerClient.pendingRequestCount());
    assertEquals(1, consumerClient.pendingRequestCount(node));
    assertFalse(future.isDone());

    consumerClient.poll(future);
    assertTrue(future.isDone());
    assertTrue(future.succeeded());

    ClientResponse clientResponse = future.value();
    HeartbeatResponse response = (HeartbeatResponse) clientResponse.responseBody();
    assertEquals(Errors.NONE, response.error());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:17,代码来源:ConsumerNetworkClientTest.java

示例3: handleListOffsetResponse

import org.apache.kafka.clients.ClientResponse; //导入方法依赖的package包/类
/**
 * Callback for the response of the list offset call above.
 * @param topicPartition The partition that was fetched
 * @param clientResponse The response from the server.
 */
private void handleListOffsetResponse(TopicPartition topicPartition,
                                      ClientResponse clientResponse,
                                      RequestFuture<Long> future) {
    ListOffsetResponse lor = new ListOffsetResponse(clientResponse.responseBody());
    short errorCode = lor.responseData().get(topicPartition).errorCode;
    if (errorCode == Errors.NONE.code()) {
        List<Long> offsets = lor.responseData().get(topicPartition).offsets;
        if (offsets.size() != 1)
            throw new IllegalStateException("This should not happen.");
        long offset = offsets.get(0);
        log.debug("Fetched offset {} for partition {}", offset, topicPartition);

        future.complete(offset);
    } else if (errorCode == Errors.NOT_LEADER_FOR_PARTITION.code()
            || errorCode == Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) {
        log.debug("Attempt to fetch offsets for partition {} failed due to obsolete leadership information, retrying.",
                topicPartition);
        future.raise(Errors.forCode(errorCode));
    } else {
        log.warn("Attempt to fetch offsets for partition {} failed due to: {}",
                topicPartition, Errors.forCode(errorCode).message());
        future.raise(new StaleMetadataException());
    }
}
 
开发者ID:txazo,项目名称:kafka,代码行数:30,代码来源:Fetcher.java

示例4: send

import org.apache.kafka.clients.ClientResponse; //导入方法依赖的package包/类
@Test
public void send() {
    client.prepareResponse(heartbeatResponse(Errors.NONE.code()));
    RequestFuture<ClientResponse> future = consumerClient.send(node, ApiKeys.METADATA, heartbeatRequest());
    assertEquals(1, consumerClient.pendingRequestCount());
    assertEquals(1, consumerClient.pendingRequestCount(node));
    assertFalse(future.isDone());

    consumerClient.poll(future);
    assertTrue(future.isDone());
    assertTrue(future.succeeded());

    ClientResponse clientResponse = future.value();
    HeartbeatResponse response = new HeartbeatResponse(clientResponse.responseBody());
    assertEquals(Errors.NONE.code(), response.errorCode());
}
 
开发者ID:txazo,项目名称:kafka,代码行数:17,代码来源:ConsumerNetworkClientTest.java

示例5: checkBrokerCompatibility

import org.apache.kafka.clients.ClientResponse; //导入方法依赖的package包/类
/**
 * Check if the used brokers have version 0.10.1.x or higher.
 * <p>
 * Note, for <em>pre</em> 0.10.x brokers the broker version cannot be checked and the client will hang and retry
 * until it {@link StreamsConfig#REQUEST_TIMEOUT_MS_CONFIG times out}.
 *
 * @throws StreamsException if brokers have version 0.10.0.x
 */
public void checkBrokerCompatibility(final boolean eosEnabled) throws StreamsException {
    final ClientRequest clientRequest = kafkaClient.newClientRequest(
        getAnyReadyBrokerId(),
        new ApiVersionsRequest.Builder(),
        Time.SYSTEM.milliseconds(),
        true);

    final ClientResponse clientResponse = sendRequest(clientRequest);
    if (!clientResponse.hasResponse()) {
        throw new StreamsException("Empty response for client request.");
    }
    if (!(clientResponse.responseBody() instanceof ApiVersionsResponse)) {
        throw new StreamsException("Inconsistent response type for API versions request. " +
            "Expected ApiVersionsResponse but received " + clientResponse.responseBody().getClass().getName());
    }

    final ApiVersionsResponse apiVersionsResponse =  (ApiVersionsResponse) clientResponse.responseBody();

    if (apiVersionsResponse.apiVersion(ApiKeys.CREATE_TOPICS.id) == null) {
        throw new StreamsException("Kafka Streams requires broker version 0.10.1.x or higher.");
    }

    if (eosEnabled && !brokerSupportsTransactions(apiVersionsResponse)) {
        throw new StreamsException("Setting " + PROCESSING_GUARANTEE_CONFIG + "=" + EXACTLY_ONCE + " requires broker version 0.11.0.x or higher.");
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:35,代码来源:StreamsKafkaClient.java

示例6: onSuccess

import org.apache.kafka.clients.ClientResponse; //导入方法依赖的package包/类
@Override
public void onSuccess(ClientResponse resp, RequestFuture<Void> future) {
    log.debug("Received GroupCoordinator response {} for group {}", resp, groupId);

    FindCoordinatorResponse findCoordinatorResponse = (FindCoordinatorResponse) resp.responseBody();
    // use MAX_VALUE - node.id as the coordinator id to mimic separate connections
    // for the coordinator in the underlying network client layer
    // TODO: this needs to be better handled in KAFKA-1935
    Errors error = findCoordinatorResponse.error();
    clearFindCoordinatorFuture();
    if (error == Errors.NONE) {
        synchronized (AbstractCoordinator.this) {
            // 创建GroupCoordinator对应的Node对象
            AbstractCoordinator.this.coordinator = new Node(
                    Integer.MAX_VALUE - findCoordinatorResponse.node().id(),
                    findCoordinatorResponse.node().host(),
                    findCoordinatorResponse.node().port());
            log.info("Discovered coordinator {} for group {}.", coordinator, groupId);
            // 连接
            client.tryConnect(coordinator);
            heartbeat.resetTimeouts(time.milliseconds());
        }
        future.complete(null);
    } else if (error == Errors.GROUP_AUTHORIZATION_FAILED) {
        future.raise(new GroupAuthorizationException(groupId));
    } else {
        log.debug("Group coordinator lookup for group {} failed: {}", groupId, error.message());
        future.raise(error);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:31,代码来源:AbstractCoordinator.java

示例7: handleGroupMetadataResponse

import org.apache.kafka.clients.ClientResponse; //导入方法依赖的package包/类
private void handleGroupMetadataResponse(ClientResponse resp, RequestFuture<Void> future) {
    log.debug("Received group coordinator response {}", resp);

    if (!coordinatorUnknown()) {
        // We already found the coordinator, so ignore the request
        future.complete(null);
    } else {
        GroupCoordinatorResponse groupCoordinatorResponse = new GroupCoordinatorResponse(resp.responseBody());
        // use MAX_VALUE - node.id as the coordinator id to mimic separate connections
        // for the coordinator in the underlying network client layer
        // TODO: this needs to be better handled in KAFKA-1935
        Errors error = Errors.forCode(groupCoordinatorResponse.errorCode());
        if (error == Errors.NONE) {
            this.coordinator = new Node(Integer.MAX_VALUE - groupCoordinatorResponse.node().id(),
                    groupCoordinatorResponse.node().host(),
                    groupCoordinatorResponse.node().port());

            log.info("Discovered coordinator {} for group {}.", coordinator, groupId);

            client.tryConnect(coordinator);

            // start sending heartbeats only if we have a valid generation
            if (generation > 0)
                heartbeatTask.reset();
            future.complete(null);
        } else if (error == Errors.GROUP_AUTHORIZATION_FAILED) {
            future.raise(new GroupAuthorizationException(groupId));
        } else {
            future.raise(error);
        }
    }
}
 
开发者ID:txazo,项目名称:kafka,代码行数:33,代码来源:AbstractCoordinator.java

示例8: sendExpiry

import org.apache.kafka.clients.ClientResponse; //导入方法依赖的package包/类
@Test
public void sendExpiry() throws InterruptedException {
    long unsentExpiryMs = 10;
    final AtomicBoolean isReady = new AtomicBoolean();
    final AtomicBoolean disconnected = new AtomicBoolean();
    client = new MockClient(time) {
        @Override
        public boolean ready(Node node, long now) {
            if (isReady.get())
                return super.ready(node, now);
            else
                return false;
        }
        @Override
        public boolean connectionFailed(Node node) {
            return disconnected.get();
        }
    };
    // Queue first send, sleep long enough for this to expire and then queue second send
    consumerClient = new ConsumerNetworkClient(client, metadata, time, 100, unsentExpiryMs);
    RequestFuture<ClientResponse> future1 = consumerClient.send(node, heartbeat());
    assertEquals(1, consumerClient.pendingRequestCount());
    assertEquals(1, consumerClient.pendingRequestCount(node));
    assertFalse(future1.isDone());

    time.sleep(unsentExpiryMs + 1);
    RequestFuture<ClientResponse> future2 = consumerClient.send(node, heartbeat());
    assertEquals(2, consumerClient.pendingRequestCount());
    assertEquals(2, consumerClient.pendingRequestCount(node));
    assertFalse(future2.isDone());

    // First send should have expired and second send still pending
    consumerClient.poll(0);
    assertTrue(future1.isDone());
    assertFalse(future1.succeeded());
    assertEquals(1, consumerClient.pendingRequestCount());
    assertEquals(1, consumerClient.pendingRequestCount(node));
    assertFalse(future2.isDone());

    // Enable send, the un-expired send should succeed on poll
    isReady.set(true);
    client.prepareResponse(heartbeatResponse(Errors.NONE));
    consumerClient.poll(future2);
    ClientResponse clientResponse = future2.value();
    HeartbeatResponse response = (HeartbeatResponse) clientResponse.responseBody();
    assertEquals(Errors.NONE, response.error());

    // Disable ready flag to delay send and queue another send. Disconnection should remove pending send
    isReady.set(false);
    RequestFuture<ClientResponse> future3 = consumerClient.send(node, heartbeat());
    assertEquals(1, consumerClient.pendingRequestCount());
    assertEquals(1, consumerClient.pendingRequestCount(node));
    disconnected.set(true);
    consumerClient.poll(0);
    assertTrue(future3.isDone());
    assertFalse(future3.succeeded());
    assertEquals(0, consumerClient.pendingRequestCount());
    assertEquals(0, consumerClient.pendingRequestCount(node));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:60,代码来源:ConsumerNetworkClientTest.java

示例9: parse

import org.apache.kafka.clients.ClientResponse; //导入方法依赖的package包/类
@Override
public JoinGroupResponse parse(ClientResponse response) {
    return new JoinGroupResponse(response.responseBody());
}
 
开发者ID:txazo,项目名称:kafka,代码行数:5,代码来源:AbstractCoordinator.java

示例10: parse

import org.apache.kafka.clients.ClientResponse; //导入方法依赖的package包/类
@Override
public OffsetCommitResponse parse(ClientResponse response) {
    return new OffsetCommitResponse(response.responseBody());
}
 
开发者ID:txazo,项目名称:kafka,代码行数:5,代码来源:ConsumerCoordinator.java

示例11: sendExpiry

import org.apache.kafka.clients.ClientResponse; //导入方法依赖的package包/类
@Test
public void sendExpiry() throws InterruptedException {
    long unsentExpiryMs = 10;
    final AtomicBoolean isReady = new AtomicBoolean();
    final AtomicBoolean disconnected = new AtomicBoolean();
    client = new MockClient(time) {
        @Override
        public boolean ready(Node node, long now) {
            if (isReady.get())
                return super.ready(node, now);
            else
                return false;
        }
        @Override
        public boolean connectionFailed(Node node) {
            return disconnected.get();
        }
    };
    // Queue first send, sleep long enough for this to expire and then queue second send
    consumerClient = new ConsumerNetworkClient(client, metadata, time, 100, unsentExpiryMs);
    RequestFuture<ClientResponse> future1 = consumerClient.send(node, ApiKeys.METADATA, heartbeatRequest());
    assertEquals(1, consumerClient.pendingRequestCount());
    assertEquals(1, consumerClient.pendingRequestCount(node));
    assertFalse(future1.isDone());

    time.sleep(unsentExpiryMs + 1);
    RequestFuture<ClientResponse> future2 = consumerClient.send(node, ApiKeys.METADATA, heartbeatRequest());
    assertEquals(2, consumerClient.pendingRequestCount());
    assertEquals(2, consumerClient.pendingRequestCount(node));
    assertFalse(future2.isDone());

    // First send should have expired and second send still pending
    consumerClient.poll(0);
    assertTrue(future1.isDone());
    assertFalse(future1.succeeded());
    assertEquals(1, consumerClient.pendingRequestCount());
    assertEquals(1, consumerClient.pendingRequestCount(node));
    assertFalse(future2.isDone());

    // Enable send, the un-expired send should succeed on poll
    isReady.set(true);
    client.prepareResponse(heartbeatResponse(Errors.NONE.code()));
    consumerClient.poll(future2);
    ClientResponse clientResponse = future2.value();
    HeartbeatResponse response = new HeartbeatResponse(clientResponse.responseBody());
    assertEquals(Errors.NONE.code(), response.errorCode());

    // Disable ready flag to delay send and queue another send. Disconnection should remove pending send
    isReady.set(false);
    RequestFuture<ClientResponse> future3 = consumerClient.send(node, ApiKeys.METADATA, heartbeatRequest());
    assertEquals(1, consumerClient.pendingRequestCount());
    assertEquals(1, consumerClient.pendingRequestCount(node));
    disconnected.set(true);
    consumerClient.poll(0);
    assertTrue(future3.isDone());
    assertFalse(future3.succeeded());
    assertEquals(0, consumerClient.pendingRequestCount());
    assertEquals(0, consumerClient.pendingRequestCount(node));
}
 
开发者ID:txazo,项目名称:kafka,代码行数:60,代码来源:ConsumerNetworkClientTest.java


注:本文中的org.apache.kafka.clients.ClientResponse.responseBody方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。