本文整理汇总了Java中io.confluent.connect.hdfs.storage.Storage.url方法的典型用法代码示例。如果您正苦于以下问题:Java Storage.url方法的具体用法?Java Storage.url怎么用?Java Storage.url使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类io.confluent.connect.hdfs.storage.Storage
的用法示例。
在下文中一共展示了Storage.url方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: FSWAL
import io.confluent.connect.hdfs.storage.Storage; //导入方法依赖的package包/类
public FSWAL(String logsDir, TopicPartition topicPart, Storage storage)
throws ConnectException {
this.storage = storage;
this.conf = storage.conf();
String url = storage.url();
logFile = FileUtils.logFileName(url, logsDir, topicPart);
}
示例2: TopicPartitionWriter
import io.confluent.connect.hdfs.storage.Storage; //导入方法依赖的package包/类
public TopicPartitionWriter(
TopicPartition tp,
Storage storage,
RecordWriterProvider writerProvider,
Partitioner partitioner,
HdfsSinkConnectorConfig connectorConfig,
SinkTaskContext context,
AvroData avroData,
HiveMetaStore hiveMetaStore,
HiveUtil hive,
SchemaFileReader schemaFileReader,
ExecutorService executorService,
Queue<Future<Void>> hiveUpdateFutures) {
this.tp = tp;
this.connectorConfig = connectorConfig;
this.context = context;
this.avroData = avroData;
this.storage = storage;
this.writerProvider = writerProvider;
this.partitioner = partitioner;
this.url = storage.url();
this.conf = storage.conf();
this.schemaFileReader = schemaFileReader;
topicsDir = connectorConfig.getString(HdfsSinkConnectorConfig.TOPICS_DIR_CONFIG);
flushSize = connectorConfig.getInt(HdfsSinkConnectorConfig.FLUSH_SIZE_CONFIG);
rotateIntervalMs = connectorConfig.getLong(HdfsSinkConnectorConfig.ROTATE_INTERVAL_MS_CONFIG);
rotateScheduleIntervalMs = connectorConfig.getLong(HdfsSinkConnectorConfig.ROTATE_SCHEDULE_INTERVAL_MS_CONFIG);
timeoutMs = connectorConfig.getLong(HdfsSinkConnectorConfig.RETRY_BACKOFF_CONFIG);
compatibility = SchemaUtils.getCompatibility(
connectorConfig.getString(HdfsSinkConnectorConfig.SCHEMA_COMPATIBILITY_CONFIG));
String logsDir = connectorConfig.getString(HdfsSinkConnectorConfig.LOGS_DIR_CONFIG);
wal = storage.wal(logsDir, tp);
buffer = new LinkedList<>();
writers = new HashMap<>();
tempFiles = new HashMap<>();
appended = new HashSet<>();
startOffsets = new HashMap<>();
offsets = new HashMap<>();
state = State.RECOVERY_STARTED;
failureTime = -1L;
offset = -1L;
sawInvalidOffset = false;
extension = writerProvider.getExtension();
zeroPadOffsetFormat
= "%0" +
connectorConfig.getInt(HdfsSinkConnectorConfig.FILENAME_OFFSET_ZERO_PAD_WIDTH_CONFIG) +
"d";
hiveIntegration = connectorConfig.getBoolean(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG);
if (hiveIntegration) {
hiveDatabase = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_DATABASE_CONFIG);
this.hiveMetaStore = hiveMetaStore;
this.hive = hive;
this.executorService = executorService;
this.hiveUpdateFutures = hiveUpdateFutures;
hivePartitions = new HashSet<>();
}
if(rotateScheduleIntervalMs > 0) {
timeZone = DateTimeZone.forID(connectorConfig.getString(HdfsSinkConnectorConfig.TIMEZONE_CONFIG));
}
// Initialize rotation timers
updateRotationTimers();
}
示例3: MemoryWAL
import io.confluent.connect.hdfs.storage.Storage; //导入方法依赖的package包/类
public MemoryWAL(String topicsDir, TopicPartition topicPart, Storage storage)
throws ConnectException {
this.storage = storage;
String url = storage.url();
logFile = FileUtils.logFileName(url, topicsDir, topicPart);
}