本文整理汇总了Java中org.apache.kafka.common.errors.UnsupportedVersionException类的典型用法代码示例。如果您正苦于以下问题:Java UnsupportedVersionException类的具体用法?Java UnsupportedVersionException怎么用?Java UnsupportedVersionException使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
UnsupportedVersionException类属于org.apache.kafka.common.errors包,在下文中一共展示了UnsupportedVersionException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: build
import org.apache.kafka.common.errors.UnsupportedVersionException; //导入依赖的package包/类
@Override
public OffsetFetchRequest build(short version) {
if (isAllTopicPartitions() && version < 2)
throw new UnsupportedVersionException("The broker only supports OffsetFetchRequest " +
"v" + version + ", but we need v2 or newer to request all topic partitions.");
return new OffsetFetchRequest(groupId, partitions, version);
}
示例2: returnNullWithApiVersionMismatch
import org.apache.kafka.common.errors.UnsupportedVersionException; //导入依赖的package包/类
/**
* 0.10.x clients can't talk with 0.9.x brokers, and 0.10.0.0 introduced the new protocol with API versions.
* That means we can simulate an API version mismatch.
*
* @throws Exception
*/
@Test
public void returnNullWithApiVersionMismatch() {
final NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
boolean internal = false;
Cluster cluster = createCluster(1);
try (MockKafkaAdminClientEnv env = new MockKafkaAdminClientEnv(cluster)) {
env.kafkaClient().setNode(cluster.controller());
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet());
env.kafkaClient().prepareResponse(createTopicResponseWithUnsupportedVersion(newTopic));
TopicAdmin admin = new TopicAdmin(null, env.adminClient());
admin.createTopic(newTopic);
fail();
} catch (UnsupportedVersionException e) {
// expected
}
}
示例3: ClientResponse
import org.apache.kafka.common.errors.UnsupportedVersionException; //导入依赖的package包/类
/**
* @param requestHeader The header of the corresponding request
* @param callback The callback to be invoked
* @param createdTimeMs The unix timestamp when the corresponding request was created
* @param destination The node the corresponding request was sent to
* @param receivedTimeMs The unix timestamp when this response was received
* @param disconnected Whether the client disconnected before fully reading a response
* @param versionMismatch Whether there was a version mismatch that prevented sending the request.
* @param responseBody The response contents (or null) if we disconnected, no response was expected,
* or if there was a version mismatch.
*/
public ClientResponse(RequestHeader requestHeader,
RequestCompletionHandler callback,
String destination,
long createdTimeMs,
long receivedTimeMs,
boolean disconnected,
UnsupportedVersionException versionMismatch,
AbstractResponse responseBody) {
this.requestHeader = requestHeader;
this.callback = callback;
this.destination = destination;
this.receivedTimeMs = receivedTimeMs;
this.latencyMs = receivedTimeMs - createdTimeMs;
this.disconnected = disconnected;
this.versionMismatch = versionMismatch;
this.responseBody = responseBody;
}
示例4: tryFeature
import org.apache.kafka.common.errors.UnsupportedVersionException; //导入依赖的package包/类
private void tryFeature(String featureName, boolean supported, Invoker invoker, ResultTester resultTester)
throws Throwable {
try {
invoker.invoke();
log.info("Successfully used feature {}", featureName);
} catch (UnsupportedVersionException e) {
log.info("Got UnsupportedVersionException when attempting to use feature {}", featureName);
if (supported) {
throw new RuntimeException("Expected " + featureName + " to be supported, but it wasn't.", e);
}
return;
}
if (!supported) {
throw new RuntimeException("Did not expect " + featureName + " to be supported, but it was.");
}
resultTester.test();
}
示例5: build
import org.apache.kafka.common.errors.UnsupportedVersionException; //导入依赖的package包/类
@Override
public ListOffsetRequest build(short version) {
if (version < minVersion) {
throw new UnsupportedVersionException("Cannot create a v" + version + " ListOffsetRequest because " +
"we require features supported only in " + minVersion + " or later.");
}
if (version == 0) {
if (offsetData == null) {
if (partitionTimestamps == null) {
throw new RuntimeException("Must set partitionTimestamps or offsetData when creating a v0 " +
"ListOffsetRequest");
} else {
offsetData = new HashMap<>();
for (Map.Entry<TopicPartition, Long> entry: partitionTimestamps.entrySet()) {
offsetData.put(entry.getKey(),
new PartitionData(entry.getValue(), 1));
}
this.partitionTimestamps = null;
}
}
} else {
if (offsetData != null) {
throw new RuntimeException("Cannot create a v" + version + " ListOffsetRequest with v0 " +
"PartitionData.");
} else if (partitionTimestamps == null) {
throw new RuntimeException("Must set partitionTimestamps when creating a v" +
version + " ListOffsetRequest");
}
}
Map<TopicPartition, ?> m = (version == 0) ? offsetData : partitionTimestamps;
return new ListOffsetRequest(replicaId, m, isolationLevel, version);
}
示例6: recordsBuilder
import org.apache.kafka.common.errors.UnsupportedVersionException; //导入依赖的package包/类
private MemoryRecordsBuilder recordsBuilder(ByteBuffer buffer, byte maxUsableMagic) {
if (transactionManager != null && maxUsableMagic < RecordBatch.MAGIC_VALUE_V2) {
throw new UnsupportedVersionException("Attempting to use idempotence with a broker which does not " +
"support the required message format (v2). The broker must be version 0.11 or later.");
}
return MemoryRecords.builder(buffer, maxUsableMagic, compression, TimestampType.CREATE_TIME, 0L);
}
示例7: ensureUsable
import org.apache.kafka.common.errors.UnsupportedVersionException; //导入依赖的package包/类
private void ensureUsable() {
if (value == NODE_TOO_OLD)
throw new UnsupportedVersionException("The broker is too old to support " + apiKey +
" version " + apiKey.oldestVersion());
else if (value == NODE_TOO_NEW)
throw new UnsupportedVersionException("The broker is too new to support " + apiKey +
" version " + apiKey.latestVersion());
}
示例8: build
import org.apache.kafka.common.errors.UnsupportedVersionException; //导入依赖的package包/类
@Override
public FindCoordinatorRequest build(short version) {
if (version < minVersion)
throw new UnsupportedVersionException("Cannot create a v" + version + " FindCoordinator request " +
"because we require features supported only in " + minVersion + " or later.");
return new FindCoordinatorRequest(coordinatorType, coordinatorKey, version);
}
示例9: build
import org.apache.kafka.common.errors.UnsupportedVersionException; //导入依赖的package包/类
@Override
public MetadataRequest build(short version) {
if (version < 1)
throw new UnsupportedVersionException("MetadataRequest versions older than 1 are not supported.");
if (!allowAutoTopicCreation && version < 4)
throw new UnsupportedVersionException("MetadataRequest versions older than 4 don't support the " +
"allowAutoTopicCreation field");
return new MetadataRequest(this.topics, allowAutoTopicCreation, version);
}
示例10: build
import org.apache.kafka.common.errors.UnsupportedVersionException; //导入依赖的package包/类
@Override
public CreateTopicsRequest build(short version) {
if (validateOnly && version == 0)
throw new UnsupportedVersionException("validateOnly is not supported in version 0 of " +
"CreateTopicsRequest");
return new CreateTopicsRequest(topics, timeout, validateOnly, version);
}
示例11: build
import org.apache.kafka.common.errors.UnsupportedVersionException; //导入依赖的package包/类
@Override
public OffsetCommitRequest build(short version) {
switch (version) {
case 0:
return new OffsetCommitRequest(groupId, DEFAULT_GENERATION_ID, DEFAULT_MEMBER_ID,
DEFAULT_RETENTION_TIME, offsetData, version);
case 1:
case 2:
case 3:
long retentionTime = version == 1 ? DEFAULT_RETENTION_TIME : this.retentionTime;
return new OffsetCommitRequest(groupId, generationId, memberId, retentionTime, offsetData, version);
default:
throw new UnsupportedVersionException("Unsupported version " + version);
}
}
示例12: build
import org.apache.kafka.common.errors.UnsupportedVersionException; //导入依赖的package包/类
@Override
public ProduceRequest build(short version) {
if (version < 2)
throw new UnsupportedVersionException("ProduceRequest versions older than 2 are not supported.");
return new ProduceRequest(version, acks, timeout, partitionRecords, transactionalId);
}
示例13: build
import org.apache.kafka.common.errors.UnsupportedVersionException; //导入依赖的package包/类
@Override
public UpdateMetadataRequest build(short version) {
if (version == 0) {
for (Broker broker : liveBrokers) {
if (broker.endPoints.size() != 1 || broker.endPoints.get(0).securityProtocol != SecurityProtocol.PLAINTEXT) {
throw new UnsupportedVersionException("UpdateMetadataRequest v0 only handles PLAINTEXT endpoints");
}
}
}
return new UpdateMetadataRequest(version, controllerId, controllerEpoch, partitionStates, liveBrokers);
}
示例14: testUsableVersionCalculation
import org.apache.kafka.common.errors.UnsupportedVersionException; //导入依赖的package包/类
@Test
public void testUsableVersionCalculation() {
List<ApiVersion> versionList = new ArrayList<>();
versionList.add(new ApiVersion(ApiKeys.CONTROLLED_SHUTDOWN_KEY.id, (short) 0, (short) 0));
versionList.add(new ApiVersion(ApiKeys.FETCH.id, (short) 1, (short) 2));
NodeApiVersions versions = new NodeApiVersions(versionList);
try {
versions.usableVersion(ApiKeys.CONTROLLED_SHUTDOWN_KEY);
Assert.fail("expected UnsupportedVersionException");
} catch (UnsupportedVersionException e) {
// pass
}
assertEquals(2, versions.usableVersion(ApiKeys.FETCH));
}
示例15: testIdempotenceWithOldMagic
import org.apache.kafka.common.errors.UnsupportedVersionException; //导入依赖的package包/类
@Test(expected = UnsupportedVersionException.class)
public void testIdempotenceWithOldMagic() throws InterruptedException {
// Simulate talking to an older broker, ie. one which supports a lower magic.
ApiVersions apiVersions = new ApiVersions();
int batchSize = 1025;
apiVersions.update("foobar", NodeApiVersions.create(Arrays.asList(new ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE.id,
(short) 0, (short) 2))));
RecordAccumulator accum = new RecordAccumulator(batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * batchSize,
CompressionType.NONE, 10, 100L, metrics, time, apiVersions, new TransactionManager());
accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, 0);
}