本文整理汇总了Java中org.apache.flume.source.avro.AvroFlumeEvent类的典型用法代码示例。如果您正苦于以下问题:Java AvroFlumeEvent类的具体用法?Java AvroFlumeEvent怎么用?Java AvroFlumeEvent使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
AvroFlumeEvent类属于org.apache.flume.source.avro包,在下文中一共展示了AvroFlumeEvent类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: SavePolicy
import org.apache.flume.source.avro.AvroFlumeEvent; //导入依赖的package包/类
private SavePolicy(Context context) {
String uri = context.getString(CONFIG_KITE_ERROR_DATASET_URI);
Preconditions.checkArgument(uri != null, "Must set "
+ CONFIG_KITE_ERROR_DATASET_URI + " when " + CONFIG_FAILURE_POLICY
+ "=save");
if (Datasets.exists(uri)) {
dataset = Datasets.load(uri, AvroFlumeEvent.class);
} else {
DatasetDescriptor descriptor = new DatasetDescriptor.Builder()
.schema(AvroFlumeEvent.class)
.build();
dataset = Datasets.create(uri, descriptor, AvroFlumeEvent.class);
}
nEventsHandled = 0;
}
示例2: handle
import org.apache.flume.source.avro.AvroFlumeEvent; //导入依赖的package包/类
@Override
public void handle(Event event, Throwable cause) throws EventDeliveryException {
try {
if (writer == null) {
writer = dataset.newWriter();
}
final AvroFlumeEvent avroEvent = new AvroFlumeEvent();
avroEvent.setBody(ByteBuffer.wrap(event.getBody()));
avroEvent.setHeaders(toCharSeqMap(event.getHeaders()));
writer.write(avroEvent);
nEventsHandled++;
} catch (RuntimeException ex) {
throw new EventDeliveryException(ex);
}
}
示例3: serializeEvent
import org.apache.flume.source.avro.AvroFlumeEvent; //导入依赖的package包/类
private byte[] serializeEvent(Event event, boolean useAvroEventFormat) throws IOException {
byte[] bytes;
if (useAvroEventFormat) {
if (!tempOutStream.isPresent()) {
tempOutStream = Optional.of(new ByteArrayOutputStream());
}
if (!writer.isPresent()) {
writer = Optional.of(new SpecificDatumWriter<AvroFlumeEvent>(AvroFlumeEvent.class));
}
tempOutStream.get().reset();
AvroFlumeEvent e = new AvroFlumeEvent(toCharSeqMap(event.getHeaders()),
ByteBuffer.wrap(event.getBody()));
encoder = EncoderFactory.get().directBinaryEncoder(tempOutStream.get(), encoder);
writer.get().write(e, encoder);
encoder.flush();
bytes = tempOutStream.get().toByteArray();
} else {
bytes = event.getBody();
}
return bytes;
}
示例4: serializeValue
import org.apache.flume.source.avro.AvroFlumeEvent; //导入依赖的package包/类
private byte[] serializeValue(Event event, boolean parseAsFlumeEvent) throws IOException {
byte[] bytes;
if (parseAsFlumeEvent) {
if (!tempOutStream.isPresent()) {
tempOutStream = Optional.of(new ByteArrayOutputStream());
}
if (!writer.isPresent()) {
writer = Optional.of(new
SpecificDatumWriter<AvroFlumeEvent>(AvroFlumeEvent.class));
}
tempOutStream.get().reset();
AvroFlumeEvent e = new AvroFlumeEvent(
toCharSeqMap(event.getHeaders()),
ByteBuffer.wrap(event.getBody()));
encoder = EncoderFactory.get()
.directBinaryEncoder(tempOutStream.get(), encoder);
writer.get().write(e, encoder);
encoder.flush();
bytes = tempOutStream.get().toByteArray();
} else {
bytes = event.getBody();
}
return bytes;
}
示例5: deserializeValue
import org.apache.flume.source.avro.AvroFlumeEvent; //导入依赖的package包/类
private Event deserializeValue(byte[] value, boolean parseAsFlumeEvent) throws IOException {
Event e;
if (parseAsFlumeEvent) {
ByteArrayInputStream in =
new ByteArrayInputStream(value);
decoder = DecoderFactory.get().directBinaryDecoder(in, decoder);
if (!reader.isPresent()) {
reader = Optional.of(
new SpecificDatumReader<AvroFlumeEvent>(AvroFlumeEvent.class));
}
AvroFlumeEvent event = reader.get().read(null, decoder);
e = EventBuilder.withBody(event.getBody().array(),
toStringMap(event.getHeaders()));
} else {
e = EventBuilder.withBody(value, Collections.EMPTY_MAP);
}
return e;
}
示例6: append
import org.apache.flume.source.avro.AvroFlumeEvent; //导入依赖的package包/类
@Override
public Status append(AvroFlumeEvent avroEvent) {
logger.debug("Avro source {}: Received avro event: {}", getName(),
avroEvent);
sourceCounter.incrementAppendReceivedCount();
sourceCounter.incrementEventReceivedCount();
Event event = EventBuilder.withBody(avroEvent.getBody().array(),
toStringMap(avroEvent.getHeaders()));
try {
getChannelProcessor().processEvent(event);
} catch (ChannelException ex) {
logger.warn("Avro source " + getName() + ": Unable to process event. " +
"Exception follows.", ex);
return Status.FAILED;
}
sourceCounter.incrementAppendAcceptedCount();
sourceCounter.incrementEventAcceptedCount();
return Status.OK;
}
示例7: unwrap
import org.apache.flume.source.avro.AvroFlumeEvent; //导入依赖的package包/类
public Event unwrap(Event event) {
if (LOG.isDebugEnabled()) {
LOG.debug("Attempting to unwrap event, body [" + event.getBody().length + "] bytes");
}
Event eventUnwrapped = event;
InputStream eventWrappedStream = new ByteArrayInputStream(event.getBody());
try {
decoder = DecoderFactory.get().directBinaryDecoder(eventWrappedStream, decoder);
AvroFlumeEvent eventUnwrappedAvro = reader.read(null, decoder);
eventUnwrapped = EventBuilder.withBody(eventUnwrappedAvro.getBody().array(),
toStringMap(eventUnwrappedAvro.getHeaders(), event.getHeaders()));
if (LOG.isDebugEnabled()) {
LOG.debug("Event successfully unwrapped, header [" + eventUnwrappedAvro.getHeaders().size() + "] fields, body ["
+ eventUnwrapped.getBody().length + "] bytes");
}
} catch (Exception exception) {
if (LOG.isWarnEnabled()) {
LOG.warn("Failed to unwrap event, " + "perhaps this source is not connected to a sinkless connector?", exception);
}
} finally {
IOUtils.closeQuietly(eventWrappedStream);
}
return eventUnwrapped;
}
示例8: poll
import org.apache.flume.source.avro.AvroFlumeEvent; //导入依赖的package包/类
public Event poll() {
AvroFlumeEvent avroEvent = null;
try {
avroEvent = eventQueue.poll(30000, TimeUnit.MILLISECONDS);
} catch (final InterruptedException ie) {
// Ignore the exception.
}
if (avroEvent != null) {
return EventBuilder.withBody(avroEvent.getBody().array(),
toStringMap(avroEvent.getHeaders()));
} else {
System.out.println("No Event returned");
}
return null;
}
示例9: append
import org.apache.flume.source.avro.AvroFlumeEvent; //导入依赖的package包/类
@Override
public Status append(AvroFlumeEvent event) throws AvroRemoteException {
if (failed) {
logger.debug("Event rejected");
return Status.FAILED;
}
logger.debug("LB: Received event from append(): {}",
new String(event.getBody().array(), Charset.forName("UTF8")));
appendCount++;
return Status.OK;
}
示例10: appendBatch
import org.apache.flume.source.avro.AvroFlumeEvent; //导入依赖的package包/类
@Override
public Status appendBatch(List<AvroFlumeEvent> events) throws
AvroRemoteException {
if (failed) {
logger.debug("Event batch rejected");
return Status.FAILED;
}
logger.debug("LB: Received {} events from appendBatch()",
events.size());
appendBatchCount++;
return Status.OK;
}
示例11: append
import org.apache.flume.source.avro.AvroFlumeEvent; //导入依赖的package包/类
@Override
public Status append(AvroFlumeEvent avroEvent) {
if (logger.isDebugEnabled()) {
if (LogPrivacyUtil.allowLogRawData()) {
logger.debug("Avro source {}: Received avro event: {}", getName(), avroEvent);
} else {
logger.debug("Avro source {}: Received avro event", getName());
}
}
sourceCounter.incrementAppendReceivedCount();
sourceCounter.incrementEventReceivedCount();
Event event = EventBuilder.withBody(avroEvent.getBody().array(),
toStringMap(avroEvent.getHeaders()));
try {
getChannelProcessor().processEvent(event);
} catch (ChannelException ex) {
logger.warn("Avro source " + getName() + ": Unable to process event. " +
"Exception follows.", ex);
return Status.FAILED;
}
sourceCounter.incrementAppendAcceptedCount();
sourceCounter.incrementEventAcceptedCount();
return Status.OK;
}
示例12: appendBatch
import org.apache.flume.source.avro.AvroFlumeEvent; //导入依赖的package包/类
@Override
public Status appendBatch(List<AvroFlumeEvent> events) {
logger.debug("Avro source {}: Received avro event batch of {} events.",
getName(), events.size());
sourceCounter.incrementAppendBatchReceivedCount();
sourceCounter.addToEventReceivedCount(events.size());
List<Event> batch = new ArrayList<Event>();
for (AvroFlumeEvent avroEvent : events) {
Event event = EventBuilder.withBody(avroEvent.getBody().array(),
toStringMap(avroEvent.getHeaders()));
batch.add(event);
}
try {
getChannelProcessor().processEventBatch(batch);
} catch (Throwable t) {
logger.error("Avro source " + getName() + ": Unable to process event " +
"batch. Exception follows.", t);
if (t instanceof Error) {
throw (Error) t;
}
return Status.FAILED;
}
sourceCounter.incrementAppendBatchAcceptedCount();
sourceCounter.addToEventAcceptedCount(events.size());
return Status.OK;
}
示例13: appendBatch
import org.apache.flume.source.avro.AvroFlumeEvent; //导入依赖的package包/类
@Override
public Status appendBatch(List<AvroFlumeEvent> events)
throws AvroRemoteException {
logger.debug("Received event batch:{}; delaying for {}ms", events, delay);
sleep();
return Status.OK;
}
示例14: testMissingSchemaWithSavePolicy
import org.apache.flume.source.avro.AvroFlumeEvent; //导入依赖的package包/类
@Test
public void testMissingSchemaWithSavePolicy() throws EventDeliveryException {
if (Datasets.exists(ERROR_DATASET_URI)) {
Datasets.delete(ERROR_DATASET_URI);
}
config.put(DatasetSinkConstants.CONFIG_FAILURE_POLICY,
DatasetSinkConstants.SAVE_FAILURE_POLICY);
config.put(DatasetSinkConstants.CONFIG_KITE_ERROR_DATASET_URI,
ERROR_DATASET_URI);
final DatasetSink sink = sink(in, config);
Event badEvent = new SimpleEvent();
badEvent.setHeaders(Maps.<String, String>newHashMap());
badEvent.setBody(serialize(expected.get(0), RECORD_SCHEMA));
putToChannel(in, badEvent);
// run the sink
sink.start();
sink.process();
sink.stop();
Assert.assertEquals("Good records should have been written",
Sets.newHashSet(expected),
read(Datasets.load(FILE_DATASET_URI)));
Assert.assertEquals("Should not have rolled back", 0, remaining(in));
Assert.assertEquals("Should have saved the bad event",
Sets.newHashSet(AvroFlumeEvent.newBuilder()
.setBody(ByteBuffer.wrap(badEvent.getBody()))
.setHeaders(toUtf8Map(badEvent.getHeaders()))
.build()),
read(Datasets.load(ERROR_DATASET_URI, AvroFlumeEvent.class)));
}
示例15: testSerializedWithIncompatibleSchemasWithSavePolicy
import org.apache.flume.source.avro.AvroFlumeEvent; //导入依赖的package包/类
@Test
public void testSerializedWithIncompatibleSchemasWithSavePolicy()
throws EventDeliveryException {
if (Datasets.exists(ERROR_DATASET_URI)) {
Datasets.delete(ERROR_DATASET_URI);
}
config.put(DatasetSinkConstants.CONFIG_FAILURE_POLICY,
DatasetSinkConstants.SAVE_FAILURE_POLICY);
config.put(DatasetSinkConstants.CONFIG_KITE_ERROR_DATASET_URI,
ERROR_DATASET_URI);
final DatasetSink sink = sink(in, config);
GenericRecordBuilder builder = new GenericRecordBuilder(
INCOMPATIBLE_SCHEMA);
GenericData.Record rec = builder.set("username", "koala").build();
// We pass in a valid schema in the header, but an incompatible schema
// was used to serialize the record
Event badEvent = event(rec, INCOMPATIBLE_SCHEMA, SCHEMA_FILE, true);
putToChannel(in, badEvent);
// run the sink
sink.start();
sink.process();
sink.stop();
Assert.assertEquals("Good records should have been written",
Sets.newHashSet(expected),
read(Datasets.load(FILE_DATASET_URI)));
Assert.assertEquals("Should not have rolled back", 0, remaining(in));
Assert.assertEquals("Should have saved the bad event",
Sets.newHashSet(AvroFlumeEvent.newBuilder()
.setBody(ByteBuffer.wrap(badEvent.getBody()))
.setHeaders(toUtf8Map(badEvent.getHeaders()))
.build()),
read(Datasets.load(ERROR_DATASET_URI, AvroFlumeEvent.class)));
}