本文整理汇总了Java中org.apache.kafka.connect.source.SourceTaskContext类的典型用法代码示例。如果您正苦于以下问题:Java SourceTaskContext类的具体用法?Java SourceTaskContext怎么用?Java SourceTaskContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
SourceTaskContext类属于org.apache.kafka.connect.source包,在下文中一共展示了SourceTaskContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: before
import org.apache.kafka.connect.source.SourceTaskContext; //导入依赖的package包/类
@BeforeEach
public void before() {
this.sourceTaskContext = mock(SourceTaskContext.class);
this.offsetStorageReader = mock(OffsetStorageReader.class);
when(this.sourceTaskContext.offsetStorageReader()).thenReturn(this.offsetStorageReader);
this.task = new KinesisSourceTask();
this.task.initialize(this.sourceTaskContext);
this.kinesisClient = mock(AmazonKinesis.class);
this.task.time = mock(Time.class);
this.task.kinesisClientFactory = mock(KinesisClientFactory.class);
when(this.task.kinesisClientFactory.create(any())).thenReturn(this.kinesisClient);
this.settings = TestData.settings();
this.config = new KinesisSourceConnectorConfig(this.settings);
}
示例2: setup
import org.apache.kafka.connect.source.SourceTaskContext; //导入依赖的package包/类
@Before
public void setup() throws IOException, SQLException {
String mysqlHost = "10.100.172.86";
connection = DriverManager.getConnection("jdbc:mysql://" + mysqlHost + ":3306/mysql", "root", "passwd");
config = new HashMap<>();
config.put(MySqlSourceConnector.USER_CONFIG, "maxwell");
config.put(MySqlSourceConnector.PASSWORD_CONFIG, "XXXXXX");
config.put(MySqlSourceConnector.PORT_CONFIG, "3306");
config.put(MySqlSourceConnector.HOST_CONFIG, mysqlHost);
task = new MySqlSourceTask();
offsetStorageReader = PowerMock.createMock(OffsetStorageReader.class);
context = PowerMock.createMock(SourceTaskContext.class);
task.initialize(context);
runSql("drop table if exists test.users");
runSql("drop database if exists test");
}
示例3: setup
import org.apache.kafka.connect.source.SourceTaskContext; //导入依赖的package包/类
@Before
public void setup() throws IOException {
tempFile = File.createTempFile("file-stream-source-task-test", null);
config = new HashMap<>();
config.put(FileStreamSourceConnector.FILE_CONFIG, tempFile.getAbsolutePath());
config.put(FileStreamSourceConnector.TOPIC_CONFIG, TOPIC);
task = new FileStreamSourceTask();
offsetStorageReader = PowerMock.createMock(OffsetStorageReader.class);
context = PowerMock.createMock(SourceTaskContext.class);
task.initialize(context);
}
示例4: testPollsInBackground
import org.apache.kafka.connect.source.SourceTaskContext; //导入依赖的package包/类
@Test
public void testPollsInBackground() throws Exception {
createWorkerTask();
sourceTask.initialize(EasyMock.anyObject(SourceTaskContext.class));
EasyMock.expectLastCall();
sourceTask.start(TASK_PROPS);
EasyMock.expectLastCall();
statusListener.onStartup(taskId);
EasyMock.expectLastCall();
final CountDownLatch pollLatch = expectPolls(10);
// In this test, we don't flush, so nothing goes any further than the offset writer
sourceTask.stop();
EasyMock.expectLastCall();
expectOffsetFlush(true);
statusListener.onShutdown(taskId);
EasyMock.expectLastCall();
producer.close(EasyMock.anyLong(), EasyMock.anyObject(TimeUnit.class));
EasyMock.expectLastCall();
transformationChain.close();
EasyMock.expectLastCall();
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
Future<?> taskFuture = executor.submit(workerTask);
assertTrue(awaitLatch(pollLatch));
workerTask.stop();
assertTrue(workerTask.awaitStop(1000));
taskFuture.get();
PowerMock.verifyAll();
}
示例5: initTask
import org.apache.kafka.connect.source.SourceTaskContext; //导入依赖的package包/类
@Before
public void initTask() {
task = new FsSourceTask();
taskConfig = new HashMap<String, String>() {{
String uris[] = directories.stream().map(dir -> dir.toString())
.toArray(size -> new String[size]);
put(FsSourceTaskConfig.FS_URIS, String.join(",", uris));
put(FsSourceTaskConfig.TOPIC, "topic_test");
put(FsSourceTaskConfig.POLICY_CLASS, SimplePolicy.class.getName());
put(FsSourceTaskConfig.FILE_READER_CLASS, TextFileReader.class.getName());
put(FsSourceTaskConfig.POLICY_REGEXP, "^[0-9]*\\.txt$");
}};
//Mock initialization
taskContext = PowerMock.createMock(SourceTaskContext.class);
offsetStorageReader = PowerMock.createMock(OffsetStorageReader.class);
EasyMock.expect(taskContext.offsetStorageReader())
.andReturn(offsetStorageReader);
EasyMock.expect(taskContext.offsetStorageReader())
.andReturn(offsetStorageReader);
EasyMock.expect(offsetStorageReader.offset(EasyMock.anyObject()))
.andReturn(new HashMap<String, Object>() {{
put("offset", 5L);
}});
EasyMock.expect(offsetStorageReader.offset(EasyMock.anyObject()))
.andReturn(new HashMap<String, Object>() {{
put("offset", 5L);
}});
EasyMock.checkOrder(taskContext, false);
EasyMock.replay(taskContext);
EasyMock.checkOrder(offsetStorageReader, false);
EasyMock.replay(offsetStorageReader);
task.initialize(taskContext);
}
示例6: testPause
import org.apache.kafka.connect.source.SourceTaskContext; //导入依赖的package包/类
@Test
public void testPause() throws Exception {
createWorkerTask();
sourceTask.initialize(EasyMock.anyObject(SourceTaskContext.class));
EasyMock.expectLastCall();
sourceTask.start(TASK_PROPS);
EasyMock.expectLastCall();
statusListener.onStartup(taskId);
EasyMock.expectLastCall();
AtomicInteger count = new AtomicInteger(0);
CountDownLatch pollLatch = expectPolls(10, count);
// In this test, we don't flush, so nothing goes any further than the offset writer
statusListener.onPause(taskId);
EasyMock.expectLastCall();
sourceTask.stop();
EasyMock.expectLastCall();
expectOffsetFlush(true);
statusListener.onShutdown(taskId);
EasyMock.expectLastCall();
producer.close(EasyMock.anyLong(), EasyMock.anyObject(TimeUnit.class));
EasyMock.expectLastCall();
transformationChain.close();
EasyMock.expectLastCall();
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
Future<?> taskFuture = executor.submit(workerTask);
assertTrue(awaitLatch(pollLatch));
workerTask.transitionTo(TargetState.PAUSED);
int priorCount = count.get();
Thread.sleep(100);
// since the transition is observed asynchronously, the count could be off by one loop iteration
assertTrue(count.get() - priorCount <= 1);
workerTask.stop();
assertTrue(workerTask.awaitStop(1000));
taskFuture.get();
PowerMock.verifyAll();
}
示例7: testFailureInPoll
import org.apache.kafka.connect.source.SourceTaskContext; //导入依赖的package包/类
@Test
public void testFailureInPoll() throws Exception {
createWorkerTask();
sourceTask.initialize(EasyMock.anyObject(SourceTaskContext.class));
EasyMock.expectLastCall();
sourceTask.start(TASK_PROPS);
EasyMock.expectLastCall();
statusListener.onStartup(taskId);
EasyMock.expectLastCall();
final CountDownLatch pollLatch = new CountDownLatch(1);
final RuntimeException exception = new RuntimeException();
EasyMock.expect(sourceTask.poll()).andAnswer(new IAnswer<List<SourceRecord>>() {
@Override
public List<SourceRecord> answer() throws Throwable {
pollLatch.countDown();
throw exception;
}
});
statusListener.onFailure(taskId, exception);
EasyMock.expectLastCall();
sourceTask.stop();
EasyMock.expectLastCall();
expectOffsetFlush(true);
producer.close(EasyMock.anyLong(), EasyMock.anyObject(TimeUnit.class));
EasyMock.expectLastCall();
transformationChain.close();
EasyMock.expectLastCall();
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
Future<?> taskFuture = executor.submit(workerTask);
assertTrue(awaitLatch(pollLatch));
workerTask.stop();
assertTrue(workerTask.awaitStop(1000));
taskFuture.get();
PowerMock.verifyAll();
}
示例8: testCommit
import org.apache.kafka.connect.source.SourceTaskContext; //导入依赖的package包/类
@Test
public void testCommit() throws Exception {
// Test that the task commits properly when prompted
createWorkerTask();
sourceTask.initialize(EasyMock.anyObject(SourceTaskContext.class));
EasyMock.expectLastCall();
sourceTask.start(TASK_PROPS);
EasyMock.expectLastCall();
statusListener.onStartup(taskId);
EasyMock.expectLastCall();
// We'll wait for some data, then trigger a flush
final CountDownLatch pollLatch = expectPolls(1);
expectOffsetFlush(true);
sourceTask.stop();
EasyMock.expectLastCall();
expectOffsetFlush(true);
statusListener.onShutdown(taskId);
EasyMock.expectLastCall();
producer.close(EasyMock.anyLong(), EasyMock.anyObject(TimeUnit.class));
EasyMock.expectLastCall();
transformationChain.close();
EasyMock.expectLastCall();
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
Future<?> taskFuture = executor.submit(workerTask);
assertTrue(awaitLatch(pollLatch));
assertTrue(workerTask.commitOffsets());
workerTask.stop();
assertTrue(workerTask.awaitStop(1000));
taskFuture.get();
PowerMock.verifyAll();
}
示例9: testCommitFailure
import org.apache.kafka.connect.source.SourceTaskContext; //导入依赖的package包/类
@Test
public void testCommitFailure() throws Exception {
// Test that the task commits properly when prompted
createWorkerTask();
sourceTask.initialize(EasyMock.anyObject(SourceTaskContext.class));
EasyMock.expectLastCall();
sourceTask.start(TASK_PROPS);
EasyMock.expectLastCall();
statusListener.onStartup(taskId);
EasyMock.expectLastCall();
// We'll wait for some data, then trigger a flush
final CountDownLatch pollLatch = expectPolls(1);
expectOffsetFlush(true);
sourceTask.stop();
EasyMock.expectLastCall();
expectOffsetFlush(false);
statusListener.onShutdown(taskId);
EasyMock.expectLastCall();
producer.close(EasyMock.anyLong(), EasyMock.anyObject(TimeUnit.class));
EasyMock.expectLastCall();
transformationChain.close();
EasyMock.expectLastCall();
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
Future<?> taskFuture = executor.submit(workerTask);
assertTrue(awaitLatch(pollLatch));
assertTrue(workerTask.commitOffsets());
workerTask.stop();
assertTrue(workerTask.awaitStop(1000));
taskFuture.get();
PowerMock.verifyAll();
}
示例10: testSlowTaskStart
import org.apache.kafka.connect.source.SourceTaskContext; //导入依赖的package包/类
@Test
public void testSlowTaskStart() throws Exception {
final CountDownLatch startupLatch = new CountDownLatch(1);
final CountDownLatch finishStartupLatch = new CountDownLatch(1);
createWorkerTask();
sourceTask.initialize(EasyMock.anyObject(SourceTaskContext.class));
EasyMock.expectLastCall();
sourceTask.start(TASK_PROPS);
EasyMock.expectLastCall().andAnswer(new IAnswer<Object>() {
@Override
public Object answer() throws Throwable {
startupLatch.countDown();
assertTrue(awaitLatch(finishStartupLatch));
return null;
}
});
statusListener.onStartup(taskId);
EasyMock.expectLastCall();
sourceTask.stop();
EasyMock.expectLastCall();
expectOffsetFlush(true);
statusListener.onShutdown(taskId);
EasyMock.expectLastCall();
producer.close(EasyMock.anyLong(), EasyMock.anyObject(TimeUnit.class));
EasyMock.expectLastCall();
transformationChain.close();
EasyMock.expectLastCall();
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
Future<?> workerTaskFuture = executor.submit(workerTask);
// Stopping immediately while the other thread has work to do should result in no polling, no offset commits,
// exiting the work thread immediately, and the stop() method will be invoked in the background thread since it
// cannot be invoked immediately in the thread trying to stop the task.
assertTrue(awaitLatch(startupLatch));
workerTask.stop();
finishStartupLatch.countDown();
assertTrue(workerTask.awaitStop(1000));
workerTaskFuture.get();
PowerMock.verifyAll();
}
示例11: poll
import org.apache.kafka.connect.source.SourceTaskContext; //导入依赖的package包/类
protected void poll(final String packageName, TestCase testCase) throws InterruptedException, IOException {
String keySchemaConfig = ObjectMapperFactory.INSTANCE.writeValueAsString(testCase.keySchema);
String valueSchemaConfig = ObjectMapperFactory.INSTANCE.writeValueAsString(testCase.valueSchema);
Map<String, String> settings = Maps.newLinkedHashMap();
settings.put(SpoolDirSourceConnectorConfig.INPUT_PATH_CONFIG, this.inputPath.getAbsolutePath());
settings.put(SpoolDirSourceConnectorConfig.FINISHED_PATH_CONFIG, this.finishedPath.getAbsolutePath());
settings.put(SpoolDirSourceConnectorConfig.ERROR_PATH_CONFIG, this.errorPath.getAbsolutePath());
settings.put(SpoolDirSourceConnectorConfig.INPUT_FILE_PATTERN_CONF, String.format("^.*\\.%s", packageName));
settings.put(SpoolDirSourceConnectorConfig.TOPIC_CONF, "testing");
settings.put(SpoolDirSourceConnectorConfig.KEY_SCHEMA_CONF, keySchemaConfig);
settings.put(SpoolDirSourceConnectorConfig.VALUE_SCHEMA_CONF, valueSchemaConfig);
settings.put(SpoolDirSourceConnectorConfig.EMPTY_POLL_WAIT_MS_CONF, "10");
settings(settings);
if (null != testCase.settings && !testCase.settings.isEmpty()) {
settings.putAll(testCase.settings);
}
this.task = createTask();
SourceTaskContext sourceTaskContext = mock(SourceTaskContext.class);
OffsetStorageReader offsetStorageReader = mock(OffsetStorageReader.class);
when(offsetStorageReader.offset(anyMap())).thenReturn(testCase.offset);
when(sourceTaskContext.offsetStorageReader()).thenReturn(offsetStorageReader);
this.task.initialize(sourceTaskContext);
this.task.start(settings);
String dataFile = new File(packageName, Files.getNameWithoutExtension(testCase.path.toString())) + ".data";
log.trace("poll(String, TestCase) - dataFile={}", dataFile);
String inputFileName = String.format("%s.%s",
Files.getNameWithoutExtension(testCase.path.toString()),
packageName
);
final File inputFile = new File(this.inputPath, inputFileName);
log.trace("poll(String, TestCase) - inputFile = {}", inputFile);
final File processingFile = this.task.processingFile(inputFile);
try (InputStream inputStream = this.getClass().getResourceAsStream(dataFile)) {
try (OutputStream outputStream = new FileOutputStream(inputFile)) {
ByteStreams.copy(inputStream, outputStream);
}
}
assertFalse(processingFile.exists(), String.format("processingFile %s should not exist before first poll().", processingFile));
assertTrue(inputFile.exists(), String.format("inputFile %s should exist.", inputFile));
List<SourceRecord> records = this.task.poll();
assertTrue(inputFile.exists(), String.format("inputFile %s should exist after first poll().", inputFile));
assertTrue(processingFile.exists(), String.format("processingFile %s should exist after first poll().", processingFile));
assertNotNull(records, "records should not be null.");
assertFalse(records.isEmpty(), "records should not be empty");
assertEquals(testCase.expected.size(), records.size(), "records.size() does not match.");
for (int i = 0; i < testCase.expected.size(); i++) {
SourceRecord expectedRecord = testCase.expected.get(i);
SourceRecord actualRecord = records.get(i);
assertSourceRecord(expectedRecord, actualRecord, String.format("index:%s", i));
}
records = this.task.poll();
assertTrue(records.isEmpty(), "records should be null after first poll.");
records = this.task.poll();
assertTrue(records.isEmpty(), "records should be null after first poll.");
assertFalse(inputFile.exists(), String.format("inputFile %s should not exist.", inputFile));
assertFalse(processingFile.exists(), String.format("processingFile %s should not exist.", processingFile));
assertTrue(records.isEmpty(), "records should be empty.");
final File finishedFile = new File(this.finishedPath, inputFileName);
assertTrue(finishedFile.exists(), String.format("finishedFile %s should exist.", finishedFile));
}
示例12: initialize
import org.apache.kafka.connect.source.SourceTaskContext; //导入依赖的package包/类
@Override
public void initialize(SourceTaskContext context) {
super.initialize(context);
LOG.info("AMQP source task initialized");
}
示例13: setUp
import org.apache.kafka.connect.source.SourceTaskContext; //导入依赖的package包/类
@Override
public void setUp() {
offsets = new HashMap<>();
totalWrittenDocuments = 0;
try {
super.setUp();
mongodStarter = MongodStarter.getDefaultInstance();
mongodConfig = new MongodConfigBuilder()
.version(Version.Main.V3_2)
.replication(new Storage(REPLICATION_PATH, "rs0", 1024))
.net(new Net(12345, Network.localhostIsIPv6()))
.build();
mongodExecutable = mongodStarter.prepare(mongodConfig);
mongod = mongodExecutable.start();
mongoClient = new MongoClient(new ServerAddress("localhost", 12345));
MongoDatabase adminDatabase = mongoClient.getDatabase("admin");
BasicDBObject replicaSetSetting = new BasicDBObject();
replicaSetSetting.put("_id", "rs0");
BasicDBList members = new BasicDBList();
DBObject host = new BasicDBObject();
host.put("_id", 0);
host.put("host", "127.0.0.1:12345");
members.add(host);
replicaSetSetting.put("members", members);
adminDatabase.runCommand(new BasicDBObject("isMaster", 1));
adminDatabase.runCommand(new BasicDBObject("replSetInitiate", replicaSetSetting));
MongoDatabase db = mongoClient.getDatabase("mydb");
db.createCollection("test1");
db.createCollection("test2");
db.createCollection("test3");
} catch (Exception e) {
// Assert.assertTrue(false);
}
task = new MongodbSourceTask();
offsetStorageReader = PowerMock.createMock(OffsetStorageReader.class);
context = PowerMock.createMock(SourceTaskContext.class);
task.initialize(context);
sourceProperties = new HashMap<>();
sourceProperties.put("uri", "mongodb://localhost:12345");
sourceProperties.put("batch.size", Integer.toString(100));
sourceProperties.put("schema.name", "schema");
sourceProperties.put("topic.prefix", "prefix");
sourceProperties.put("databases", "mydb.test1,mydb.test2,mydb.test3");
}
示例14: setUp
import org.apache.kafka.connect.source.SourceTaskContext; //导入依赖的package包/类
@Override
public void setUp() {
offsets = new HashMap<>();
totalWrittenDocuments = 0;
try {
super.setUp();
mongodStarter = MongodStarter.getDefaultInstance();
mongodConfig = new MongodConfigBuilder()
.version(Version.Main.V3_2)
.replication(new Storage(REPLICATION_PATH, "rs0", 1024))
.net(new Net(12345, Network.localhostIsIPv6()))
.build();
mongodExecutable = mongodStarter.prepare(mongodConfig);
mongod = mongodExecutable.start();
mongoClient = new MongoClient(new ServerAddress("localhost", 12345));
MongoDatabase adminDatabase = mongoClient.getDatabase("admin");
BasicDBObject replicaSetSetting = new BasicDBObject();
replicaSetSetting.put("_id", "rs0");
BasicDBList members = new BasicDBList();
DBObject host = new BasicDBObject();
host.put("_id", 0);
host.put("host", "127.0.0.1:12345");
members.add(host);
replicaSetSetting.put("members", members);
adminDatabase.runCommand(new BasicDBObject("isMaster", 1));
adminDatabase.runCommand(new BasicDBObject("replSetInitiate", replicaSetSetting));
MongoDatabase db = mongoClient.getDatabase("mydb");
db.createCollection("test1");
db.createCollection("test2");
db.createCollection("test3");
} catch (Exception e) {
// Assert.assertTrue(false);
}
task = new MongodbSourceTask();
offsetStorageReader = PowerMock.createMock(OffsetStorageReader.class);
context = PowerMock.createMock(SourceTaskContext.class);
task.initialize(context);
sourceProperties = new HashMap<>();
sourceProperties.put("host", "localhost");
sourceProperties.put("port", Integer.toString(12345));
sourceProperties.put("batch.size", Integer.toString(100));
sourceProperties.put("schema.name", "schema");
sourceProperties.put("topic.prefix", "prefix");
sourceProperties.put("databases", "mydb.test1,mydb.test2,mydb.test3");
}
示例15: initialize
import org.apache.kafka.connect.source.SourceTaskContext; //导入依赖的package包/类
@Override
public void initialize(SourceTaskContext context) {
super.initialize(context);
}