本文整理汇总了Java中org.apache.kafka.common.utils.Time.milliseconds方法的典型用法代码示例。如果您正苦于以下问题:Java Time.milliseconds方法的具体用法?Java Time.milliseconds怎么用?Java Time.milliseconds使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.common.utils.Time
的用法示例。
在下文中一共展示了Time.milliseconds方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: awaitReady
import org.apache.kafka.common.utils.Time; //导入方法依赖的package包/类
/**
* Invokes `client.poll` to discard pending disconnects, followed by `client.ready` and 0 or more `client.poll`
* invocations until the connection to `node` is ready, the timeoutMs expires or the connection fails.
*
* It returns `true` if the call completes normally or `false` if the timeoutMs expires. If the connection fails,
* an `IOException` is thrown instead. Note that if the `NetworkClient` has been configured with a positive
* connection timeoutMs, it is possible for this method to raise an `IOException` for a previous connection which
* has recently disconnected.
*
* This method is useful for implementing blocking behaviour on top of the non-blocking `NetworkClient`, use it with
* care.
*/
public static boolean awaitReady(KafkaClient client, Node node, Time time, long timeoutMs) throws IOException {
if (timeoutMs < 0) {
throw new IllegalArgumentException("Timeout needs to be greater than 0");
}
long startTime = time.milliseconds();
long expiryTime = startTime + timeoutMs;
if (isReady(client, node, startTime) || client.ready(node, startTime))
return true;
long attemptStartTime = time.milliseconds();
while (!client.isReady(node, attemptStartTime) && attemptStartTime < expiryTime) {
if (client.connectionFailed(node)) {
throw new IOException("Connection to " + node + " failed.");
}
long pollTimeout = expiryTime - attemptStartTime;
client.poll(pollTimeout, attemptStartTime);
attemptStartTime = time.milliseconds();
}
return client.isReady(node, attemptStartTime);
}
示例2: WorkerSinkTask
import org.apache.kafka.common.utils.Time; //导入方法依赖的package包/类
public WorkerSinkTask(ConnectorTaskId id,
SinkTask task,
TaskStatus.Listener statusListener,
TargetState initialState,
WorkerConfig workerConfig,
Converter keyConverter,
Converter valueConverter,
TransformationChain<SinkRecord> transformationChain,
ClassLoader loader,
Time time) {
super(id, statusListener, initialState, loader);
this.workerConfig = workerConfig;
this.task = task;
this.keyConverter = keyConverter;
this.valueConverter = valueConverter;
this.transformationChain = transformationChain;
this.time = time;
this.messageBatch = new ArrayList<>();
this.currentOffsets = new HashMap<>();
this.pausedForRedelivery = false;
this.rebalanceException = null;
this.nextCommit = time.milliseconds() +
workerConfig.getLong(WorkerConfig.OFFSET_COMMIT_INTERVAL_MS_CONFIG);
this.committing = false;
this.commitSeqno = 0;
this.commitStarted = -1;
this.commitFailures = 0;
}
示例3: Sensor
import org.apache.kafka.common.utils.Time; //导入方法依赖的package包/类
Sensor(Metrics registry, String name, Sensor[] parents, MetricConfig config, Time time,
long inactiveSensorExpirationTimeSeconds, RecordingLevel recordingLevel) {
super();
this.registry = registry;
this.name = Utils.notNull(name);
this.parents = parents == null ? new Sensor[0] : parents;
this.metrics = new ArrayList<>();
this.stats = new ArrayList<>();
this.config = config;
this.time = time;
this.inactiveSensorExpirationTimeMs = TimeUnit.MILLISECONDS.convert(inactiveSensorExpirationTimeSeconds, TimeUnit.SECONDS);
this.lastRecordTime = time.milliseconds();
this.recordingLevel = recordingLevel;
checkForest(new HashSet<Sensor>());
}
示例4: build
import org.apache.kafka.common.utils.Time; //导入方法依赖的package包/类
public MsSqlChange build(TableMetadataProvider.TableMetadata tableMetadata, ResultSet resultSet, Time time) throws SQLException {
MsSqlChange change = new MsSqlChange();
change.timestamp = time.milliseconds();
change.databaseName = tableMetadata.databaseName();
change.schemaName = tableMetadata.schemaName();
change.tableName = tableMetadata.tableName();
final long sysChangeVersion = resultSet.getLong("__metadata_sys_change_version");
final long sysChangeCreationVersion = resultSet.getLong("__metadata_sys_change_creation_version");
final String changeOperation = resultSet.getString("__metadata_sys_change_operation");
change.metadata = ImmutableMap.of(
"sys_change_operation", changeOperation,
"sys_change_creation_version", String.valueOf(sysChangeCreationVersion),
"sys_change_version", String.valueOf(sysChangeVersion)
);
switch (changeOperation) {
case "I":
change.changeType = ChangeType.INSERT;
break;
case "U":
change.changeType = ChangeType.UPDATE;
break;
case "D":
change.changeType = ChangeType.DELETE;
break;
default:
throw new UnsupportedOperationException(
String.format("Unsupported sys_change_operation of '%s'", changeOperation)
);
}
log.trace("build() - changeType = {}", change.changeType);
change.keyColumns = new ArrayList<>(tableMetadata.keyColumns().size());
change.valueColumns = new ArrayList<>(tableMetadata.columnSchemas().size());
for (Map.Entry<String, Schema> kvp : tableMetadata.columnSchemas().entrySet()) {
String columnName = kvp.getKey();
Schema schema = kvp.getValue();
Object value;
if (Schema.Type.INT8 == schema.type()) {
// Really lame Microsoft. A tiny int is stored as a single byte with a value of 0-255.
// Explain how this should be returned as a short?
value = resultSet.getByte(columnName);
} else if (Schema.Type.INT32 == schema.type() &&
Date.LOGICAL_NAME.equals(schema.name())) {
value = new java.util.Date(
resultSet.getDate(columnName, calendar).getTime()
);
} else if (Schema.Type.INT32 == schema.type() &&
org.apache.kafka.connect.data.Time.LOGICAL_NAME.equals(schema.name())) {
value = new java.util.Date(
resultSet.getTime(columnName, calendar).getTime()
);
} else {
value = resultSet.getObject(columnName);
}
log.trace("build() - columnName = '{}' value = '{}'", columnName, value);
MsSqlColumnValue columnValue = new MsSqlColumnValue(columnName, schema, value);
change.valueColumns.add(columnValue);
if (tableMetadata.keyColumns().contains(columnName)) {
change.keyColumns.add(columnValue);
}
}
return change;
}
示例5: StreamThread
import org.apache.kafka.common.utils.Time; //导入方法依赖的package包/类
public StreamThread(final TopologyBuilder builder,
final StreamsConfig config,
final KafkaClientSupplier clientSupplier,
final String applicationId,
final String clientId,
final UUID processId,
final Metrics metrics,
final Time time,
final StreamsMetadataState streamsMetadataState,
final long cacheSizeBytes) {
super(clientId + "-StreamThread-" + STREAM_THREAD_ID_SEQUENCE.getAndIncrement());
this.applicationId = applicationId;
this.config = config;
this.builder = builder;
this.clientSupplier = clientSupplier;
sourceTopicPattern = builder.sourceTopicPattern();
this.clientId = clientId;
this.processId = processId;
partitionGrouper = config.getConfiguredInstance(StreamsConfig.PARTITION_GROUPER_CLASS_CONFIG, PartitionGrouper.class);
this.streamsMetadataState = streamsMetadataState;
threadClientId = getName();
logPrefix = String.format("stream-thread [%s]", threadClientId);
streamsMetrics = new StreamsMetricsThreadImpl(metrics, "stream-metrics", "thread." + threadClientId,
Collections.singletonMap("client-id", threadClientId));
if (config.getLong(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG) < 0) {
log.warn("{} Negative cache size passed in thread. Reverting to cache size of 0 bytes.", logPrefix);
}
cache = new ThreadCache(threadClientId, cacheSizeBytes, streamsMetrics);
eosEnabled = StreamsConfig.EXACTLY_ONCE.equals(config.getString(StreamsConfig.PROCESSING_GUARANTEE_CONFIG));
// set the consumer clients
log.info("{} Creating consumer client", logPrefix);
final Map<String, Object> consumerConfigs = config.getConsumerConfigs(this, applicationId, threadClientId);
if (!builder.latestResetTopicsPattern().pattern().equals("") || !builder.earliestResetTopicsPattern().pattern().equals("")) {
originalReset = (String) consumerConfigs.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG);
log.info("{} Custom offset resets specified updating configs original auto offset reset {}", logPrefix, originalReset);
consumerConfigs.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none");
}
consumer = clientSupplier.getConsumer(consumerConfigs);
log.info("{} Creating restore consumer client", logPrefix);
restoreConsumer = clientSupplier.getRestoreConsumer(config.getRestoreConsumerConfigs(threadClientId));
// initialize the task list
// activeTasks needs to be concurrent as it can be accessed
// by QueryableState
activeTasks = new ConcurrentHashMap<>();
standbyTasks = new HashMap<>();
activeTasksByPartition = new HashMap<>();
standbyTasksByPartition = new HashMap<>();
prevActiveTasks = new HashSet<>();
suspendedTasks = new HashMap<>();
suspendedStandbyTasks = new HashMap<>();
// standby ktables
standbyRecords = new HashMap<>();
stateDirectory = new StateDirectory(applicationId, threadClientId, config.getString(StreamsConfig.STATE_DIR_CONFIG), time);
final Object maxPollInterval = consumerConfigs.get(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG);
rebalanceTimeoutMs = (Integer) ConfigDef.parseType(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollInterval, Type.INT);
pollTimeMs = config.getLong(StreamsConfig.POLL_MS_CONFIG);
commitTimeMs = config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG);
cleanTimeMs = config.getLong(StreamsConfig.STATE_CLEANUP_DELAY_MS_CONFIG);
this.time = time;
timerStartedMs = time.milliseconds();
lastCleanMs = Long.MAX_VALUE; // the cleaning cycle won't start until partition assignment
lastCommitMs = timerStartedMs;
rebalanceListener = new RebalanceListener(time, config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG));
setState(State.RUNNING);
}
示例6: ConsumerCoordinator
import org.apache.kafka.common.utils.Time; //导入方法依赖的package包/类
/**
* Initialize the coordination manager.
*/
public ConsumerCoordinator(ConsumerNetworkClient client,
String groupId,
int rebalanceTimeoutMs,
int sessionTimeoutMs,
int heartbeatIntervalMs,
List<PartitionAssignor> assignors,
Metadata metadata,
SubscriptionState subscriptions,
Metrics metrics,
String metricGrpPrefix,
Time time,
long retryBackoffMs,
boolean autoCommitEnabled,
int autoCommitIntervalMs,
ConsumerInterceptors<?, ?> interceptors,
boolean excludeInternalTopics,
final boolean leaveGroupOnClose) {
super(client,
groupId,
rebalanceTimeoutMs,
sessionTimeoutMs,
heartbeatIntervalMs,
metrics,
metricGrpPrefix,
time,
retryBackoffMs,
leaveGroupOnClose);
this.metadata = metadata;
this.metadataSnapshot = new MetadataSnapshot(subscriptions, metadata.fetch());
this.subscriptions = subscriptions;
this.defaultOffsetCommitCallback = new DefaultOffsetCommitCallback();
this.autoCommitEnabled = autoCommitEnabled;
this.autoCommitIntervalMs = autoCommitIntervalMs;
this.assignors = assignors;
this.completedOffsetCommits = new ConcurrentLinkedQueue<>();
this.sensors = new ConsumerCoordinatorMetrics(metrics, metricGrpPrefix);
this.interceptors = interceptors;
this.excludeInternalTopics = excludeInternalTopics;
this.pendingAsyncCommits = new AtomicInteger();
if (autoCommitEnabled)
this.nextAutoCommitDeadline = time.milliseconds() + autoCommitIntervalMs;
this.metadata.requestUpdate();
//监听metadataSnapshot
addMetadataListener();
}