本文整理汇总了Java中org.apache.kafka.common.utils.Time.SYSTEM属性的典型用法代码示例。如果您正苦于以下问题:Java Time.SYSTEM属性的具体用法?Java Time.SYSTEM怎么用?Java Time.SYSTEM使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类org.apache.kafka.common.utils.Time
的用法示例。
在下文中一共展示了Time.SYSTEM属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: shouldAddUserDefinedEndPointToSubscription
@Test
public void shouldAddUserDefinedEndPointToSubscription() throws Exception {
final Properties properties = configProps();
properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, "localhost:8080");
final StreamsConfig config = new StreamsConfig(properties);
final String applicationId = "application-id";
builder.setApplicationId(applicationId);
builder.addSource("source", "input");
builder.addProcessor("processor", new MockProcessorSupplier(), "source");
builder.addSink("sink", "output", "processor");
final UUID uuid1 = UUID.randomUUID();
final String client1 = "client1";
final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
0);
partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client1));
final PartitionAssignor.Subscription subscription = partitionAssignor.subscription(Utils.mkSet("input"));
final SubscriptionInfo subscriptionInfo = SubscriptionInfo.decode(subscription.userData());
assertEquals("localhost:8080", subscriptionInfo.userEndPoint);
}
示例2: shouldThrowExceptionIfApplicationServerConfigPortIsNotAnInteger
@Test
public void shouldThrowExceptionIfApplicationServerConfigPortIsNotAnInteger() throws Exception {
final Properties properties = configProps();
final String myEndPoint = "localhost:j87yhk";
properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, myEndPoint);
final StreamsConfig config = new StreamsConfig(properties);
final UUID uuid1 = UUID.randomUUID();
final String client1 = "client1";
final String applicationId = "application-id";
builder.setApplicationId(applicationId);
final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1,
new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
0);
try {
partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client1));
Assert.fail("expected to an exception due to invalid config");
} catch (ConfigException e) {
// pass
}
}
示例3: TestStreamTask
TestStreamTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final Producer<byte[], byte[]> producer,
final Consumer<byte[], byte[]> restoreConsumer,
final StreamsConfig config,
final StreamsMetrics metrics,
final StateDirectory stateDirectory) {
super(id,
applicationId,
partitions,
topology,
consumer,
new StoreChangelogReader(restoreConsumer, Time.SYSTEM, 5000),
config,
metrics,
stateDirectory,
null,
new MockTime(),
producer);
}
示例4: shouldThrowExceptionIfApplicationServerConfigIsNotHostPortPair
@Test
public void shouldThrowExceptionIfApplicationServerConfigIsNotHostPortPair() throws Exception {
final Properties properties = configProps();
final String myEndPoint = "localhost";
properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, myEndPoint);
final StreamsConfig config = new StreamsConfig(properties);
final UUID uuid1 = UUID.randomUUID();
final String client1 = "client1";
final String applicationId = "application-id";
builder.setApplicationId(applicationId);
final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1,
new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
0);
partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(streamThread.config, mockClientSupplier.restoreConsumer));
try {
partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client1));
Assert.fail("expected to an exception due to invalid config");
} catch (ConfigException e) {
// pass
}
}
示例5: createKafkaBasedLog
private KafkaBasedLog<byte[], byte[]> createKafkaBasedLog(String topic, Map<String, Object> producerProps,
Map<String, Object> consumerProps,
Callback<ConsumerRecord<byte[], byte[]>> consumedCallback,
final NewTopic topicDescription, final Map<String, Object> adminProps) {
Runnable createTopics = new Runnable() {
@Override
public void run() {
try (TopicAdmin admin = new TopicAdmin(adminProps)) {
admin.createTopics(topicDescription);
}
}
};
return new KafkaBasedLog<>(topic, producerProps, consumerProps, consumedCallback, Time.SYSTEM, createTopics);
}
示例6: createKafkaBasedLog
private KafkaBasedLog<String, byte[]> createKafkaBasedLog(String topic, Map<String, Object> producerProps,
Map<String, Object> consumerProps,
Callback<ConsumerRecord<String, byte[]>> consumedCallback,
final NewTopic topicDescription, final Map<String, Object> adminProps) {
Runnable createTopics = new Runnable() {
@Override
public void run() {
try (TopicAdmin admin = new TopicAdmin(adminProps)) {
admin.createTopics(topicDescription);
}
}
};
return new KafkaBasedLog<>(topic, producerProps, consumerProps, consumedCallback, Time.SYSTEM, createTopics);
}
示例7: MockKafkaAdminClientEnv
public MockKafkaAdminClientEnv(Cluster cluster, Map<String, Object> config) {
this.adminClientConfig = new AdminClientConfig(config);
this.cluster = cluster;
this.metadata = new Metadata(adminClientConfig.getLong(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG),
adminClientConfig.getLong(AdminClientConfig.METADATA_MAX_AGE_CONFIG), false);
this.mockClient = new MockClient(Time.SYSTEM, this.metadata);
this.client = KafkaAdminClient.createInternal(adminClientConfig, mockClient, metadata);
}
示例8: MeteredKeyValueStore
public MeteredKeyValueStore(final KeyValueStore<K, V> inner,
final String metricScope,
final Time time) {
super(inner);
this.inner = inner;
this.metricScope = metricScope;
this.time = time != null ? time : Time.SYSTEM;
}
示例9: createTask
private AbstractTask createTask(final Consumer consumer) {
final MockTime time = new MockTime();
final Properties properties = new Properties();
properties.put(StreamsConfig.APPLICATION_ID_CONFIG, "app-id");
properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummyhost:9092");
final StreamsConfig config = new StreamsConfig(properties);
return new AbstractTask(new TaskId(0, 0),
"app",
Collections.singletonList(new TopicPartition("t", 0)),
new ProcessorTopology(Collections.<ProcessorNode>emptyList(),
Collections.<String, SourceNode>emptyMap(),
Collections.<String, SinkNode>emptyMap(),
Collections.<StateStore>emptyList(),
Collections.<String, String>emptyMap(),
Collections.<StateStore>emptyList()),
consumer,
new StoreChangelogReader(consumer, Time.SYSTEM, 5000),
false,
new StateDirectory("app", TestUtils.tempDirectory().getPath(), time),
new ThreadCache("testCache", 0, new MockStreamsMetrics(new Metrics())),
config) {
@Override
public void resume() {}
@Override
public void commit() {}
@Override
public void suspend() {}
@Override
public void close(final boolean clean) {}
};
}
示例10: testSubscription
@SuppressWarnings("unchecked")
@Test
public void testSubscription() throws Exception {
builder.addSource("source1", "topic1");
builder.addSource("source2", "topic2");
builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2");
final Set<TaskId> prevTasks = Utils.mkSet(
new TaskId(0, 1), new TaskId(1, 1), new TaskId(2, 1));
final Set<TaskId> cachedTasks = Utils.mkSet(
new TaskId(0, 1), new TaskId(1, 1), new TaskId(2, 1),
new TaskId(0, 2), new TaskId(1, 2), new TaskId(2, 2));
String clientId = "client-id";
UUID processId = UUID.randomUUID();
StreamThread thread = new StreamThread(builder, config, new MockClientSupplier(), "test", clientId, processId, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
0) {
@Override
public Set<TaskId> prevActiveTasks() {
return prevTasks;
}
@Override
public Set<TaskId> cachedTasks() {
return cachedTasks;
}
};
partitionAssignor.configure(config.getConsumerConfigs(thread, "test", clientId));
PartitionAssignor.Subscription subscription = partitionAssignor.subscription(Utils.mkSet("topic1", "topic2"));
Collections.sort(subscription.topics());
assertEquals(Utils.mkList("topic1", "topic2"), subscription.topics());
Set<TaskId> standbyTasks = new HashSet<>(cachedTasks);
standbyTasks.removeAll(prevTasks);
SubscriptionInfo info = new SubscriptionInfo(processId, prevTasks, standbyTasks, null);
assertEquals(info.encode(), subscription.userData());
}
示例11: testNetworkThreadTimeRecorded
/**
* Tests that time spent on the network thread is accumulated on each channel
*/
@Test
public void testNetworkThreadTimeRecorded() throws Exception {
selector.close();
this.selector = new Selector(NetworkReceive.UNLIMITED, 5000, new Metrics(), Time.SYSTEM,
"MetricGroup", new HashMap<String, String>(), false, true, channelBuilder);
String node = "0";
server = createEchoServer(SecurityProtocol.SSL);
InetSocketAddress addr = new InetSocketAddress("localhost", server.port());
selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE);
String message = TestUtils.randomString(10 * 1024);
NetworkTestUtils.waitForChannelReady(selector, node);
KafkaChannel channel = selector.channel(node);
assertTrue("SSL handshake time not recorded", channel.getAndResetNetworkThreadTimeNanos() > 0);
assertEquals("Time not reset", 0, channel.getAndResetNetworkThreadTimeNanos());
selector.mute(node);
selector.send(new NetworkSend(node, ByteBuffer.wrap(message.getBytes())));
while (selector.completedSends().isEmpty()) {
selector.poll(100L);
}
assertTrue("Send time not recorded", channel.getAndResetNetworkThreadTimeNanos() > 0);
assertEquals("Time not reset", 0, channel.getAndResetNetworkThreadTimeNanos());
selector.unmute(node);
while (selector.completedReceives().isEmpty()) {
selector.poll(100L);
}
assertTrue("Receive time not recorded", channel.getAndResetNetworkThreadTimeNanos() > 0);
}
示例12: testAssignWithInternalTopicThatsSourceIsAnotherInternalTopic
@Test
public void testAssignWithInternalTopicThatsSourceIsAnotherInternalTopic() throws Exception {
String applicationId = "test";
builder.setApplicationId(applicationId);
builder.addInternalTopic("topicX");
builder.addSource("source1", "topic1");
builder.addProcessor("processor1", new MockProcessorSupplier(), "source1");
builder.addSink("sink1", "topicX", "processor1");
builder.addSource("source2", "topicX");
builder.addInternalTopic("topicZ");
builder.addProcessor("processor2", new MockProcessorSupplier(), "source2");
builder.addSink("sink2", "topicZ", "processor2");
builder.addSource("source3", "topicZ");
List<String> topics = Utils.mkList("topic1", "test-topicX", "test-topicZ");
Set<TaskId> allTasks = Utils.mkSet(task0, task1, task2);
UUID uuid1 = UUID.randomUUID();
String client1 = "client1";
StreamThread thread10 = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
0);
partitionAssignor.configure(config.getConsumerConfigs(thread10, applicationId, client1));
MockInternalTopicManager internalTopicManager = new MockInternalTopicManager(thread10.config, mockClientSupplier.restoreConsumer);
partitionAssignor.setInternalTopicManager(internalTopicManager);
Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
Set<TaskId> emptyTasks = Collections.emptySet();
subscriptions.put("consumer10",
new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, emptyTasks, emptyTasks, userEndPoint).encode()));
partitionAssignor.assign(metadata, subscriptions);
// check prepared internal topics
assertEquals(2, internalTopicManager.readyTopics.size());
assertEquals(allTasks.size(), (long) internalTopicManager.readyTopics.get("test-topicZ"));
}
示例13: main
public static void main(String[] args) throws Exception {
if (args.length < 1) {
log.info("Usage: ConnectDistributed worker.properties");
Exit.exit(1);
}
String workerPropsFile = args[0];
Map<String, String> workerProps = !workerPropsFile.isEmpty() ?
Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.<String, String>emptyMap();
Time time = Time.SYSTEM;
Plugins plugins = new Plugins(workerProps);
plugins.compareAndSwapWithDelegatingLoader();
DistributedConfig config = new DistributedConfig(workerProps);
RestServer rest = new RestServer(config);
URI advertisedUrl = rest.advertisedUrl();
String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();
KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore();
offsetBackingStore.configure(config);
Worker worker = new Worker(workerId, time, plugins, config, offsetBackingStore);
StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, worker.getInternalValueConverter());
statusBackingStore.configure(config);
ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(worker.getInternalValueConverter(), config);
DistributedHerder herder = new DistributedHerder(config, time, worker, statusBackingStore, configBackingStore,
advertisedUrl.toString());
final Connect connect = new Connect(herder, rest);
try {
connect.start();
} catch (Exception e) {
log.error("Failed to start Connect", e);
connect.stop();
}
// Shutdown will be triggered by Ctrl-C or via HTTP shutdown request
connect.awaitStop();
}
示例14: RocksDBSessionStoreSupplier
public RocksDBSessionStoreSupplier(String name, long retentionPeriod, Serde<K> keySerde, Serde<V> valueSerde, boolean logged, Map<String, String> logConfig, boolean cached) {
super(name, keySerde, valueSerde, Time.SYSTEM, logged, logConfig);
this.retentionPeriod = retentionPeriod;
this.cached = cached;
}
示例15: testRegexMatchesTopicsAWhenCreated
@Test
public void testRegexMatchesTopicsAWhenCreated() throws Exception {
final Serde<String> stringSerde = Serdes.String();
final List<String> expectedFirstAssignment = Arrays.asList("TEST-TOPIC-1");
final List<String> expectedSecondAssignment = Arrays.asList("TEST-TOPIC-1", "TEST-TOPIC-2");
final StreamsConfig streamsConfig = new StreamsConfig(streamsConfiguration);
CLUSTER.createTopic("TEST-TOPIC-1");
final KStreamBuilder builder = new KStreamBuilder();
final KStream<String, String> pattern1Stream = builder.stream(Pattern.compile("TEST-TOPIC-\\d"));
pattern1Stream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
final KafkaStreams streams = new KafkaStreams(builder, streamsConfiguration);
final Field streamThreadsField = streams.getClass().getDeclaredField("threads");
streamThreadsField.setAccessible(true);
final StreamThread[] streamThreads = (StreamThread[]) streamThreadsField.get(streams);
final StreamThread originalThread = streamThreads[0];
final TestStreamThread testStreamThread = new TestStreamThread(builder, streamsConfig,
new DefaultKafkaClientSupplier(),
originalThread.applicationId, originalThread.clientId, originalThread.processId, new Metrics(), Time.SYSTEM);
final TestCondition oneTopicAdded = new TestCondition() {
@Override
public boolean conditionMet() {
return testStreamThread.assignedTopicPartitions.equals(expectedFirstAssignment);
}
};
streamThreads[0] = testStreamThread;
streams.start();
TestUtils.waitForCondition(oneTopicAdded, STREAM_TASKS_NOT_UPDATED);
CLUSTER.createTopic("TEST-TOPIC-2");
final TestCondition secondTopicAdded = new TestCondition() {
@Override
public boolean conditionMet() {
return testStreamThread.assignedTopicPartitions.equals(expectedSecondAssignment);
}
};
TestUtils.waitForCondition(secondTopicAdded, STREAM_TASKS_NOT_UPDATED);
streams.close();
}