本文整理汇总了Java中org.apache.kafka.common.utils.Time类的典型用法代码示例。如果您正苦于以下问题:Java Time类的具体用法?Java Time怎么用?Java Time使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Time类属于org.apache.kafka.common.utils包,在下文中一共展示了Time类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: BulkProcessor
import org.apache.kafka.common.utils.Time; //导入依赖的package包/类
public BulkProcessor(
Time time,
BulkClient<R, B> bulkClient,
int maxBufferedRecords,
int maxInFlightRequests,
int batchSize,
long lingerMs,
int maxRetries,
long retryBackoffMs
) {
this.time = time;
this.bulkClient = bulkClient;
this.maxBufferedRecords = maxBufferedRecords;
this.batchSize = batchSize;
this.lingerMs = lingerMs;
this.maxRetries = maxRetries;
this.retryBackoffMs = retryBackoffMs;
unsentRecords = new ArrayDeque<>(maxBufferedRecords);
final ThreadFactory threadFactory = makeThreadFactory();
farmer = threadFactory.newThread(farmerTask());
executor = Executors.newFixedThreadPool(maxInFlightRequests, threadFactory);
}
示例2: TrainingTask
import org.apache.kafka.common.utils.Time; //导入依赖的package包/类
TrainingTask(Time time,
LoadMonitorTaskRunner loadMonitorTaskRunner,
MetricFetcherManager metricFetcherManager,
SampleStore sampleStore,
long configuredSnapshotWindowMs,
long samplingIntervalMs,
long trainingStartMs,
long trainingEndMs) {
_time = time;
_loadMonitorTaskRunner = loadMonitorTaskRunner;
_metricFetcherManager = metricFetcherManager;
_sampleStore = sampleStore;
_configuredSnapshotWindowMs = configuredSnapshotWindowMs;
_samplingIntervalMs = samplingIntervalMs;
_trainingStartMs = trainingStartMs;
_trainingEndMs = trainingEndMs;
_nextSamplingStartingMs = trainingStartMs;
_nextSamplingEndMs = trainingStartMs + _samplingIntervalMs;
}
示例3: LoadMonitorTaskRunner
import org.apache.kafka.common.utils.Time; //导入依赖的package包/类
/**
* Package private constructor for unit test duplicate code.
*
* @param config The load monitor configurations.
* @param metricFetcherManager the metric fetcher manager.
* @param metricSampleAggregator The queue that holds the metric samples.
* @param metadataClient The metadata of the cluster.
* @param time The time object.
*/
LoadMonitorTaskRunner(KafkaCruiseControlConfig config,
MetricFetcherManager metricFetcherManager,
MetricSampleAggregator metricSampleAggregator,
MetadataClient metadataClient,
Time time) {
_time = time;
_metricFetcherManager = metricFetcherManager;
_metricSampleAggregator = metricSampleAggregator;
_metadataClient = metadataClient;
_sampleStore = config.getConfiguredInstance(KafkaCruiseControlConfig.SAMPLE_STORE_CLASS_CONFIG, SampleStore.class);
long samplingIntervalMs = config.getLong(KafkaCruiseControlConfig.METRIC_SAMPLING_INTERVAL_MS_CONFIG);
_samplingScheduler =
Executors.newScheduledThreadPool(2, new KafkaCruiseControlThreadFactory("SamplingScheduler", true, LOG));
_samplingIntervalMs = samplingIntervalMs;
_configuredNumSnapshots = config.getInt(KafkaCruiseControlConfig.NUM_LOAD_SNAPSHOTS_CONFIG);
_configuredSnapshotWindowMs = config.getLong(KafkaCruiseControlConfig.LOAD_SNAPSHOT_WINDOW_MS_CONFIG);
_state = new AtomicReference<>(NOT_STARTED);
_bootstrapProgress = -1.0;
}
示例4: sendAndReceive
import org.apache.kafka.common.utils.Time; //导入依赖的package包/类
/**
* Invokes `client.send` followed by 1 or more `client.poll` invocations until a response is received or a
* disconnection happens (which can happen for a number of reasons including a request timeout).
*
* In case of a disconnection, an `IOException` is thrown.
*
* This method is useful for implementing blocking behaviour on top of the non-blocking `NetworkClient`, use it with
* care.
*/
public static ClientResponse sendAndReceive(KafkaClient client, ClientRequest request, Time time) throws IOException {
//发送请求
client.send(request, time.milliseconds());
while (true) {
List<ClientResponse> responses = client.poll(Long.MAX_VALUE, time.milliseconds());
for (ClientResponse response : responses) {
if (response.requestHeader().correlationId() == request.correlationId()) {
if (response.wasDisconnected()) {
throw new IOException("Connection to " + response.destination() + " was disconnected before the response was read");
}
if (response.versionMismatch() != null) {
throw response.versionMismatch();
}
return response;
}
}
}
}
示例5: testLoadFailedBrokersFromZK
import org.apache.kafka.common.utils.Time; //导入依赖的package包/类
@Test
public void testLoadFailedBrokersFromZK() throws Exception {
Time mockTime = getMockTime();
Queue<Anomaly> anomalies = new ConcurrentLinkedQueue<>();
BrokerFailureDetector detector = createBrokerFailureDetector(anomalies, mockTime);
try {
detector.startDetection();
int brokerId = 0;
killBroker(brokerId);
long start = System.currentTimeMillis();
while (anomalies.isEmpty() && System.currentTimeMillis() < start + 30000) {
// Wait for the anomalies to be drained.
}
assertEquals(Collections.singletonMap(brokerId, 100L), detector.failedBrokers());
// shutdown, advance the clock and create a new detector.
detector.shutdown();
mockTime.sleep(100L);
detector = createBrokerFailureDetector(anomalies, mockTime);
// start the newly created detector and the broker down time should remain previous time.
detector.startDetection();
assertEquals(Collections.singletonMap(brokerId, 100L), detector.failedBrokers());
} finally {
detector.shutdown();
}
}
示例6: testSelfHealingDisabled
import org.apache.kafka.common.utils.Time; //导入依赖的package包/类
@Test
public void testSelfHealingDisabled() {
final long failureTime1 = 200L;
final long failureTime2 = 400L;
final long startTime = 500L;
Time mockTime = new MockTime(startTime);
TestingBrokerFailureAutoFixNotifier anomalyNotifier = new TestingBrokerFailureAutoFixNotifier(mockTime);
anomalyNotifier.configure(Collections.singletonMap(SelfHealingNotifier.SELF_HEALING_ENABLED_CONFIG, "false"));
Map<Integer, Long> failedBrokers = new HashMap<>();
failedBrokers.put(1, failureTime1);
failedBrokers.put(2, failureTime2);
mockTime.sleep(SelfHealingNotifier.DEFAULT_AUTO_FIX_THRESHOLD_MS + failureTime1);
anomalyNotifier.resetAlert();
AnomalyNotificationResult result = anomalyNotifier.onBrokerFailure(new BrokerFailures(failedBrokers));
assertEquals(AnomalyNotificationResult.Action.IGNORE, result.action());
assertTrue(anomalyNotifier.alertCalled);
assertFalse(anomalyNotifier.autoFixTriggered);
}
示例7: NetworkClient
import org.apache.kafka.common.utils.Time; //导入依赖的package包/类
public NetworkClient(Selectable selector,
Metadata metadata,
String clientId,
int maxInFlightRequestsPerConnection,
long reconnectBackoffMs,
long reconnectBackoffMax,
int socketSendBuffer,
int socketReceiveBuffer,
int requestTimeoutMs,
Time time,
boolean discoverBrokerVersions,
ApiVersions apiVersions) {
this(null, metadata, selector, clientId, maxInFlightRequestsPerConnection,
reconnectBackoffMs, reconnectBackoffMax,
socketSendBuffer, socketReceiveBuffer, requestTimeoutMs, time,
discoverBrokerVersions, apiVersions, null);
}
示例8: cleanUp
import org.apache.kafka.common.utils.Time; //导入依赖的package包/类
/**
* Do a clean up of the local {@link StateStore} directory ({@link StreamsConfig#STATE_DIR_CONFIG}) by deleting all
* data with regard to the {@link StreamsConfig#APPLICATION_ID_CONFIG application ID}.
* <p>
* May only be called either before this {@code KafkaStreams} instance is {@link #start() started} or after the
* instance is {@link #close() closed}.
* <p>
* Calling this method triggers a restore of local {@link StateStore}s on the next {@link #start() application start}.
*
* @throws IllegalStateException if the instance is currently running
*/
public void cleanUp() {
if (state.isRunning()) {
throw new IllegalStateException("Cannot clean up while running.");
}
final String appId = config.getString(StreamsConfig.APPLICATION_ID_CONFIG);
final String stateDir = config.getString(StreamsConfig.STATE_DIR_CONFIG);
final String localApplicationDir = stateDir + File.separator + appId;
log.debug("{} Removing local Kafka Streams application data in {} for application {}.",
logPrefix,
localApplicationDir,
appId);
final StateDirectory stateDirectory = new StateDirectory(appId, "cleanup", stateDir, Time.SYSTEM);
stateDirectory.cleanRemovedTasks(0);
}
示例9: awaitReady
import org.apache.kafka.common.utils.Time; //导入依赖的package包/类
/**
* Invokes `client.poll` to discard pending disconnects, followed by `client.ready` and 0 or more `client.poll`
* invocations until the connection to `node` is ready, the timeoutMs expires or the connection fails.
*
* It returns `true` if the call completes normally or `false` if the timeoutMs expires. If the connection fails,
* an `IOException` is thrown instead. Note that if the `NetworkClient` has been configured with a positive
* connection timeoutMs, it is possible for this method to raise an `IOException` for a previous connection which
* has recently disconnected.
*
* This method is useful for implementing blocking behaviour on top of the non-blocking `NetworkClient`, use it with
* care.
*/
public static boolean awaitReady(KafkaClient client, Node node, Time time, long timeoutMs) throws IOException {
if (timeoutMs < 0) {
throw new IllegalArgumentException("Timeout needs to be greater than 0");
}
long startTime = time.milliseconds();
long expiryTime = startTime + timeoutMs;
if (isReady(client, node, startTime) || client.ready(node, startTime))
return true;
long attemptStartTime = time.milliseconds();
while (!client.isReady(node, attemptStartTime) && attemptStartTime < expiryTime) {
if (client.connectionFailed(node)) {
throw new IOException("Connection to " + node + " failed.");
}
long pollTimeout = expiryTime - attemptStartTime;
client.poll(pollTimeout, attemptStartTime);
attemptStartTime = time.milliseconds();
}
return client.isReady(node, attemptStartTime);
}
示例10: GlobalStreamThread
import org.apache.kafka.common.utils.Time; //导入依赖的package包/类
public GlobalStreamThread(final ProcessorTopology topology,
final StreamsConfig config,
final Consumer<byte[], byte[]> globalConsumer,
final StateDirectory stateDirectory,
final Metrics metrics,
final Time time,
final String threadClientId) {
super(threadClientId);
this.time = time;
this.config = config;
this.topology = topology;
this.consumer = globalConsumer;
this.stateDirectory = stateDirectory;
long cacheSizeBytes = Math.max(0, config.getLong(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG) /
(config.getInt(StreamsConfig.NUM_STREAM_THREADS_CONFIG) + 1));
this.streamsMetrics = new StreamsMetricsImpl(metrics, threadClientId, Collections.singletonMap("client-id", threadClientId));
this.cache = new ThreadCache(threadClientId, cacheSizeBytes, streamsMetrics);
}
示例11: fetchMetadata
import org.apache.kafka.common.utils.Time; //导入依赖的package包/类
/**
* Fetch the metadata for all topics
*/
public MetadataResponse fetchMetadata() {
final ClientRequest clientRequest = kafkaClient.newClientRequest(
getAnyReadyBrokerId(),
MetadataRequest.Builder.allTopics(),
Time.SYSTEM.milliseconds(),
true);
final ClientResponse clientResponse = sendRequest(clientRequest);
if (!clientResponse.hasResponse()) {
throw new StreamsException("Empty response for client request.");
}
if (!(clientResponse.responseBody() instanceof MetadataResponse)) {
throw new StreamsException("Inconsistent response type for internal topic metadata request. " +
"Expected MetadataResponse but received " + clientResponse.responseBody().getClass().getName());
}
final MetadataResponse metadataResponse = (MetadataResponse) clientResponse.responseBody();
return metadataResponse;
}
示例12: testBlockTimeout
import org.apache.kafka.common.utils.Time; //导入依赖的package包/类
/**
* Test if Timeout exception is thrown when there is not enough memory to allocate and the elapsed time is greater than the max specified block time.
* And verify that the allocation should finish soon after the maxBlockTimeMs.
*/
@Test
public void testBlockTimeout() throws Exception {
BufferPool pool = new BufferPool(10, 1, metrics, Time.SYSTEM, metricGroup);
ByteBuffer buffer1 = pool.allocate(1, maxBlockTimeMs);
ByteBuffer buffer2 = pool.allocate(1, maxBlockTimeMs);
ByteBuffer buffer3 = pool.allocate(1, maxBlockTimeMs);
// First two buffers will be de-allocated within maxBlockTimeMs since the most recent de-allocation
delayedDeallocate(pool, buffer1, maxBlockTimeMs / 2);
delayedDeallocate(pool, buffer2, maxBlockTimeMs);
// The third buffer will be de-allocated after maxBlockTimeMs since the most recent de-allocation
delayedDeallocate(pool, buffer3, maxBlockTimeMs / 2 * 5);
long beginTimeMs = Time.SYSTEM.milliseconds();
try {
pool.allocate(10, maxBlockTimeMs);
fail("The buffer allocated more memory than its maximum value 10");
} catch (TimeoutException e) {
// this is good
}
assertTrue("available memory" + pool.availableMemory(), pool.availableMemory() >= 9 && pool.availableMemory() <= 10);
long endTimeMs = Time.SYSTEM.milliseconds();
assertTrue("Allocation should finish not much later than maxBlockTimeMs", endTimeMs - beginTimeMs < maxBlockTimeMs + 1000);
}
示例13: createStreamsTask
import org.apache.kafka.common.utils.Time; //导入依赖的package包/类
private StreamTask createStreamsTask(final String applicationId,
final StreamsConfig streamsConfig,
final MockClientSupplier clientSupplier,
final ProcessorTopology topology,
final TaskId taskId) {
return new StreamTask(
taskId,
applicationId,
Collections.singletonList(new TopicPartition(topicName, taskId.partition)),
topology,
clientSupplier.consumer,
new StoreChangelogReader(clientSupplier.restoreConsumer, Time.SYSTEM, 5000),
streamsConfig,
new MockStreamsMetrics(new Metrics()),
stateDirectory,
null,
new MockTime(),
clientSupplier.getProducer(new HashMap<String, Object>())) {
@Override
protected void updateOffsetLimits() {}
};
}
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:24,代码来源:StreamThreadStateStoreProviderTest.java
示例14: TestStreamTask
import org.apache.kafka.common.utils.Time; //导入依赖的package包/类
TestStreamTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final Producer<byte[], byte[]> producer,
final Consumer<byte[], byte[]> restoreConsumer,
final StreamsConfig config,
final StreamsMetrics metrics,
final StateDirectory stateDirectory) {
super(id,
applicationId,
partitions,
topology,
consumer,
new StoreChangelogReader(restoreConsumer, Time.SYSTEM, 5000),
config,
metrics,
stateDirectory,
null,
new MockTime(),
producer);
}
示例15: shouldAddUserDefinedEndPointToSubscription
import org.apache.kafka.common.utils.Time; //导入依赖的package包/类
@Test
public void shouldAddUserDefinedEndPointToSubscription() throws Exception {
final Properties properties = configProps();
properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, "localhost:8080");
final StreamsConfig config = new StreamsConfig(properties);
final String applicationId = "application-id";
builder.setApplicationId(applicationId);
builder.addSource("source", "input");
builder.addProcessor("processor", new MockProcessorSupplier(), "source");
builder.addSink("sink", "output", "processor");
final UUID uuid1 = UUID.randomUUID();
final String client1 = "client1";
final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
0);
partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client1));
final PartitionAssignor.Subscription subscription = partitionAssignor.subscription(Utils.mkSet("input"));
final SubscriptionInfo subscriptionInfo = SubscriptionInfo.decode(subscription.userData());
assertEquals("localhost:8080", subscriptionInfo.userEndPoint);
}