本文整理汇总了Java中org.apache.flume.Sink.process方法的典型用法代码示例。如果您正苦于以下问题:Java Sink.process方法的具体用法?Java Sink.process怎么用?Java Sink.process使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flume.Sink
的用法示例。
在下文中一共展示了Sink.process方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: process
import org.apache.flume.Sink; //导入方法依赖的package包/类
@Override
public Status process() throws EventDeliveryException {
Status status = null;
Iterator<Sink> sinkIterator = selector.createSinkIterator();
while (sinkIterator.hasNext()) {
Sink sink = sinkIterator.next();
try {
status = sink.process();
break;
} catch (Exception ex) {
selector.informSinkFailed(sink);
LOGGER.warn("Sink failed to consume event. "
+ "Attempting next sink if available.", ex);
}
}
if (status == null) {
throw new EventDeliveryException("All configured sinks have failed");
}
return status;
}
示例2: testEmptyChannel
import org.apache.flume.Sink; //导入方法依赖的package包/类
@Test
public void testEmptyChannel() throws UnsupportedEncodingException, EventDeliveryException {
Sink kafkaSink = new KafkaSink();
Context context = prepareDefaultContext();
Configurables.configure(kafkaSink, context);
Channel memoryChannel = new MemoryChannel();
Configurables.configure(memoryChannel, context);
kafkaSink.setChannel(memoryChannel);
kafkaSink.start();
Sink.Status status = kafkaSink.process();
if (status != Sink.Status.BACKOFF) {
fail("Error Occurred");
}
assertNull(testUtil.getNextMessageFromConsumer(DEFAULT_TOPIC));
}
示例3: prepareAndSend
import org.apache.flume.Sink; //导入方法依赖的package包/类
private Sink.Status prepareAndSend(Context context, String msg)
throws EventDeliveryException {
Sink kafkaSink = new KafkaSink();
Configurables.configure(kafkaSink, context);
Channel memoryChannel = new MemoryChannel();
Configurables.configure(memoryChannel, context);
kafkaSink.setChannel(memoryChannel);
kafkaSink.start();
Transaction tx = memoryChannel.getTransaction();
tx.begin();
Event event = EventBuilder.withBody(msg.getBytes());
memoryChannel.put(event);
tx.commit();
tx.close();
return kafkaSink.process();
}
示例4: testIRCSinkMissingSplitLineProperty
import org.apache.flume.Sink; //导入方法依赖的package包/类
@Test
public void testIRCSinkMissingSplitLineProperty() {
Sink ircSink = new IRCSink();
ircSink.setName("IRC Sink - " + UUID.randomUUID().toString());
Context context = new Context();
context.put("hostname", "localhost");
context.put("port", String.valueOf(ircServerPort));
context.put("nick", "flume");
context.put("password", "flume");
context.put("user", "flume");
context.put("name", "flume-dev");
context.put("chan", "flume");
context.put("splitchars", "false");
Configurables.configure(ircSink, context);
Channel memoryChannel = new MemoryChannel();
Configurables.configure(memoryChannel, context);
ircSink.setChannel(memoryChannel);
ircSink.start();
Transaction txn = memoryChannel.getTransaction();
txn.begin();
Event event = EventBuilder.withBody("Dummy Event".getBytes());
memoryChannel.put(event);
txn.commit();
txn.close();
try {
Sink.Status status = ircSink.process();
if (status == Sink.Status.BACKOFF) {
fail("Error occured");
}
} catch (EventDeliveryException eDelExcp) {
// noop
}
}
示例5: testDefaultTopic
import org.apache.flume.Sink; //导入方法依赖的package包/类
@Test
public void testDefaultTopic() {
Sink kafkaSink = new KafkaSink();
Context context = prepareDefaultContext();
Configurables.configure(kafkaSink, context);
Channel memoryChannel = new MemoryChannel();
Configurables.configure(memoryChannel, context);
kafkaSink.setChannel(memoryChannel);
kafkaSink.start();
String msg = "default-topic-test";
Transaction tx = memoryChannel.getTransaction();
tx.begin();
Event event = EventBuilder.withBody(msg.getBytes());
memoryChannel.put(event);
tx.commit();
tx.close();
try {
Sink.Status status = kafkaSink.process();
if (status == Sink.Status.BACKOFF) {
fail("Error Occurred");
}
} catch (EventDeliveryException ex) {
// ignore
}
String fetchedMsg = new String((byte[]) testUtil.getNextMessageFromConsumer(DEFAULT_TOPIC)
.message());
assertEquals(msg, fetchedMsg);
}
示例6: testTopicAndKeyFromHeader
import org.apache.flume.Sink; //导入方法依赖的package包/类
@Test
public void testTopicAndKeyFromHeader() throws UnsupportedEncodingException {
Sink kafkaSink = new KafkaSink();
Context context = prepareDefaultContext();
Configurables.configure(kafkaSink, context);
Channel memoryChannel = new MemoryChannel();
Configurables.configure(memoryChannel, context);
kafkaSink.setChannel(memoryChannel);
kafkaSink.start();
String msg = "test-topic-and-key-from-header";
Map<String, String> headers = new HashMap<String, String>();
headers.put("topic", TestConstants.CUSTOM_TOPIC);
headers.put("key", TestConstants.CUSTOM_KEY);
Transaction tx = memoryChannel.getTransaction();
tx.begin();
Event event = EventBuilder.withBody(msg.getBytes(), headers);
memoryChannel.put(event);
tx.commit();
tx.close();
try {
Sink.Status status = kafkaSink.process();
if (status == Sink.Status.BACKOFF) {
fail("Error Occurred");
}
} catch (EventDeliveryException ex) {
// ignore
}
MessageAndMetadata fetchedMsg =
testUtil.getNextMessageFromConsumer(TestConstants.CUSTOM_TOPIC);
assertEquals(msg, new String((byte[]) fetchedMsg.message(), "UTF-8"));
assertEquals(TestConstants.CUSTOM_KEY,
new String((byte[]) fetchedMsg.key(), "UTF-8"));
}
示例7: testAvroEvent
import org.apache.flume.Sink; //导入方法依赖的package包/类
@SuppressWarnings("rawtypes")
@Test
public void testAvroEvent() throws IOException {
Sink kafkaSink = new KafkaSink();
Context context = prepareDefaultContext();
context.put(AVRO_EVENT, "true");
Configurables.configure(kafkaSink, context);
Channel memoryChannel = new MemoryChannel();
Configurables.configure(memoryChannel, context);
kafkaSink.setChannel(memoryChannel);
kafkaSink.start();
String msg = "test-avro-event";
Map<String, String> headers = new HashMap<String, String>();
headers.put("topic", TestConstants.CUSTOM_TOPIC);
headers.put("key", TestConstants.CUSTOM_KEY);
headers.put(TestConstants.HEADER_1_KEY, TestConstants.HEADER_1_VALUE);
Transaction tx = memoryChannel.getTransaction();
tx.begin();
Event event = EventBuilder.withBody(msg.getBytes(), headers);
memoryChannel.put(event);
tx.commit();
tx.close();
try {
Sink.Status status = kafkaSink.process();
if (status == Sink.Status.BACKOFF) {
fail("Error Occurred");
}
} catch (EventDeliveryException ex) {
// ignore
}
MessageAndMetadata fetchedMsg = testUtil.getNextMessageFromConsumer(TestConstants.CUSTOM_TOPIC);
ByteArrayInputStream in = new ByteArrayInputStream((byte[]) fetchedMsg.message());
BinaryDecoder decoder = DecoderFactory.get().directBinaryDecoder(in, null);
SpecificDatumReader<AvroFlumeEvent> reader =
new SpecificDatumReader<AvroFlumeEvent>(AvroFlumeEvent.class);
AvroFlumeEvent avroevent = reader.read(null, decoder);
String eventBody = new String(avroevent.getBody().array(), Charsets.UTF_8);
Map<CharSequence, CharSequence> eventHeaders = avroevent.getHeaders();
assertEquals(msg, eventBody);
assertEquals(TestConstants.CUSTOM_KEY, new String((byte[]) fetchedMsg.key(), "UTF-8"));
assertEquals(TestConstants.HEADER_1_VALUE,
eventHeaders.get(new Utf8(TestConstants.HEADER_1_KEY)).toString());
assertEquals(TestConstants.CUSTOM_KEY, eventHeaders.get(new Utf8("key")).toString());
}
示例8: doPartitionErrors
import org.apache.flume.Sink; //导入方法依赖的package包/类
/**
* This function tests three scenarios:
* 1. PartitionOption.VALIDBUTOUTOFRANGE: An integer partition is provided,
* however it exceeds the number of partitions available on the topic.
* Expected behaviour: ChannelException thrown.
*
* 2. PartitionOption.NOTSET: The partition header is not actually set.
* Expected behaviour: Exception is not thrown because the code avoids an NPE.
*
* 3. PartitionOption.NOTANUMBER: The partition header is set, but is not an Integer.
* Expected behaviour: ChannelExeption thrown.
*
* @param option
* @throws Exception
*/
private void doPartitionErrors(PartitionOption option) throws Exception {
Sink kafkaSink = new KafkaSink();
Context context = prepareDefaultContext();
context.put(KafkaSinkConstants.PARTITION_HEADER_NAME, "partition-header");
Configurables.configure(kafkaSink, context);
Channel memoryChannel = new MemoryChannel();
Configurables.configure(memoryChannel, context);
kafkaSink.setChannel(memoryChannel);
kafkaSink.start();
String topic = findUnusedTopic();
createTopic(topic, 5);
Transaction tx = memoryChannel.getTransaction();
tx.begin();
Map<String, String> headers = new HashMap<String, String>();
headers.put("topic", topic);
switch (option) {
case VALIDBUTOUTOFRANGE:
headers.put("partition-header", "9");
break;
case NOTSET:
headers.put("wrong-header", "2");
break;
case NOTANUMBER:
headers.put("partition-header", "not-a-number");
break;
default:
break;
}
Event event = EventBuilder.withBody(String.valueOf(9).getBytes(), headers);
memoryChannel.put(event);
tx.commit();
tx.close();
Sink.Status status = kafkaSink.process();
assertEquals(Sink.Status.READY, status);
deleteTopic(topic);
}