本文整理匯總了Java中org.apache.flume.Event.setBody方法的典型用法代碼示例。如果您正苦於以下問題:Java Event.setBody方法的具體用法?Java Event.setBody怎麽用?Java Event.setBody使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.flume.Event
的用法示例。
在下文中一共展示了Event.setBody方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: intercept
import org.apache.flume.Event; //導入方法依賴的package包/類
@Override
public Event intercept(Event event)
{
Map<String, String> headers = event.getHeaders();
String eventBody = new String(event.getBody(), UTF_8);
eventBody = eventBody.trim();
int index = eventBody.indexOf(' ');
if (index == -1) {
headers.put(this.header, this.defaultTag);
}
else {
String appTag = eventBody.substring(0, index);
headers.put(this.header, appTag);
}
if (this.preserveTag == false) {
String newEventBody = eventBody.substring(index, eventBody.length()).trim();
event.setBody(newEventBody.getBytes());
}
return event;
}
示例2: testMissingSchema
import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testMissingSchema() throws EventDeliveryException {
final DatasetSink sink = sink(in, config);
Event badEvent = new SimpleEvent();
badEvent.setHeaders(Maps.<String, String>newHashMap());
badEvent.setBody(serialize(expected.get(0), RECORD_SCHEMA));
putToChannel(in, badEvent);
// run the sink
sink.start();
assertThrows("Should fail", EventDeliveryException.class,
new Callable() {
@Override
public Object call() throws EventDeliveryException {
sink.process();
return null;
}
});
sink.stop();
Assert.assertEquals("Should have rolled back",
expected.size() + 1, remaining(in));
}
示例3: withBody
import org.apache.flume.Event; //導入方法依賴的package包/類
/**
* Instantiate an Event instance based on the provided body and headers.
* If <code>headers</code> is <code>null</code>, then it is ignored.
* @param body
* @param headers
* @return
*/
public static Event withBody(byte[] body, Map<String, String> headers) {
Event event = new SimpleEvent();
if (body == null) {
body = new byte[0];
}
event.setBody(body);
if (headers != null) {
event.setHeaders(new HashMap<String, String>(headers));
}
return event;
}
示例4: intercept
import org.apache.flume.Event; //導入方法依賴的package包/類
@Override
public Event intercept(Event event) {
String origBody = new String(event.getBody(), charset);
Matcher matcher = searchPattern.matcher(origBody);
String newBody = matcher.replaceAll(replaceString);
event.setBody(newBody.getBytes(charset));
return event;
}
示例5: event
import org.apache.flume.Event; //導入方法依賴的package包/類
public static Event event(
Object datum, Schema schema, File file, boolean useURI) {
Map<String, String> headers = Maps.newHashMap();
if (useURI) {
headers.put(DatasetSinkConstants.AVRO_SCHEMA_URL_HEADER,
file.getAbsoluteFile().toURI().toString());
} else {
headers.put(DatasetSinkConstants.AVRO_SCHEMA_LITERAL_HEADER,
schema.toString());
}
Event e = new SimpleEvent();
e.setBody(serialize(datum, schema));
e.setHeaders(headers);
return e;
}
示例6: testSingleWriterUseHeaders
import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testSingleWriterUseHeaders()
throws Exception {
String[] colNames = {COL1, COL2};
String PART1_NAME = "country";
String PART2_NAME = "hour";
String[] partNames = {PART1_NAME, PART2_NAME};
List<String> partitionVals = null;
String PART1_VALUE = "%{" + PART1_NAME + "}";
String PART2_VALUE = "%y-%m-%d-%k";
partitionVals = new ArrayList<String>(2);
partitionVals.add(PART1_VALUE);
partitionVals.add(PART2_VALUE);
String tblName = "hourlydata";
TestUtil.dropDB(conf, dbName2);
String dbLocation = dbFolder.newFolder(dbName2).getCanonicalPath() + ".db";
dbLocation = dbLocation.replaceAll("\\\\","/"); // for windows paths
TestUtil.createDbAndTable(driver, dbName2, tblName, partitionVals, colNames,
colTypes, partNames, dbLocation);
int totalRecords = 4;
int batchSize = 2;
int batchCount = totalRecords / batchSize;
Context context = new Context();
context.put("hive.metastore",metaStoreURI);
context.put("hive.database",dbName2);
context.put("hive.table",tblName);
context.put("hive.partition", PART1_VALUE + "," + PART2_VALUE);
context.put("autoCreatePartitions","true");
context.put("useLocalTimeStamp", "false");
context.put("batchSize","" + batchSize);
context.put("serializer", HiveDelimitedTextSerializer.ALIAS);
context.put("serializer.fieldnames", COL1 + ",," + COL2 + ",");
context.put("heartBeatInterval", "0");
Channel channel = startSink(sink, context);
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push events in two batches - two per batch. each batch is diff hour
Transaction txn = channel.getTransaction();
txn.begin();
for (int j = 1; j <= totalRecords; j++) {
Event event = new SimpleEvent();
String body = j + ",blah,This is a log message,other stuff";
event.setBody(body.getBytes());
eventDate.clear();
eventDate.set(2014, 03, 03, j % batchCount, 1); // yy mm dd hh mm
event.getHeaders().put( "timestamp",
String.valueOf(eventDate.getTimeInMillis()) );
event.getHeaders().put( PART1_NAME, "Asia" );
bodies.add(body);
channel.put(event);
}
// execute sink to process the events
txn.commit();
txn.close();
checkRecordCountInTable(0, dbName2, tblName);
for (int i = 0; i < batchCount ; i++) {
sink.process();
}
checkRecordCountInTable(totalRecords, dbName2, tblName);
sink.stop();
// verify counters
SinkCounter counter = sink.getCounter();
Assert.assertEquals(2, counter.getConnectionCreatedCount());
Assert.assertEquals(2, counter.getConnectionClosedCount());
Assert.assertEquals(2, counter.getBatchCompleteCount());
Assert.assertEquals(0, counter.getBatchEmptyCount());
Assert.assertEquals(0, counter.getConnectionFailedCount() );
Assert.assertEquals(4, counter.getEventDrainAttemptCount());
Assert.assertEquals(4, counter.getEventDrainSuccessCount() );
}
示例7: testMissingSchemaWithSavePolicy
import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testMissingSchemaWithSavePolicy() throws EventDeliveryException {
if (Datasets.exists(ERROR_DATASET_URI)) {
Datasets.delete(ERROR_DATASET_URI);
}
config.put(DatasetSinkConstants.CONFIG_FAILURE_POLICY,
DatasetSinkConstants.SAVE_FAILURE_POLICY);
config.put(DatasetSinkConstants.CONFIG_KITE_ERROR_DATASET_URI,
ERROR_DATASET_URI);
final DatasetSink sink = sink(in, config);
Event badEvent = new SimpleEvent();
badEvent.setHeaders(Maps.<String, String>newHashMap());
badEvent.setBody(serialize(expected.get(0), RECORD_SCHEMA));
putToChannel(in, badEvent);
// run the sink
sink.start();
sink.process();
sink.stop();
Assert.assertEquals("Good records should have been written",
Sets.newHashSet(expected),
read(Datasets.load(FILE_DATASET_URI)));
Assert.assertEquals("Should not have rolled back", 0, remaining(in));
Assert.assertEquals("Should have saved the bad event",
Sets.newHashSet(AvroFlumeEvent.newBuilder()
.setBody(ByteBuffer.wrap(badEvent.getBody()))
.setHeaders(toUtf8Map(badEvent.getHeaders()))
.build()),
read(Datasets.load(ERROR_DATASET_URI, AvroFlumeEvent.class)));
}
示例8: testTextAppend
import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testTextAppend() throws InterruptedException, LifecycleException,
EventDeliveryException, IOException {
LOG.debug("Starting...");
final long rollCount = 3;
final long batchSize = 2;
final String fileName = "FlumeData";
String newPath = testPath + "/singleTextBucket";
int totalEvents = 0;
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
// context.put("hdfs.path", testPath + "/%Y-%m-%d/%H");
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.writeFormat", "Text");
context.put("hdfs.fileType", "DataStream");
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 1; i < 4; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
// loop through all the files generated and check their contains
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
// check that the roll happened correctly for the given data
long expectedFiles = totalEvents / rollCount;
if (totalEvents % rollCount > 0) expectedFiles++;
Assert.assertEquals("num files wrong, found: " +
Lists.newArrayList(fList), expectedFiles, fList.length);
verifyOutputTextFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
示例9: testSlowAppendFailure
import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testSlowAppendFailure() throws InterruptedException,
LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
final String fileName = "FlumeData";
final long rollCount = 5;
final long batchSize = 2;
final int numBatches = 2;
String newPath = testPath + "/singleBucket";
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
// create HDFS sink with slow writer
HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
sink = new HDFSEventSink(badWriterFactory);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
context.put("hdfs.callTimeout", Long.toString(1000));
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
// push the event batches into channel
for (i = 0; i < numBatches; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
event.getHeaders().put("slow", "1500");
event.setBody(("Test." + i + "." + j).getBytes());
channel.put(event);
}
txn.commit();
txn.close();
// execute sink to process the events
Status satus = sink.process();
// verify that the append returned backoff due to timeotu
Assert.assertEquals(satus, Status.BACKOFF);
}
sink.stop();
}
示例10: testAppend
import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testAppend() throws InterruptedException, LifecycleException,
EventDeliveryException, IOException {
Context context = new Context();
context.put("sink.directory", tmpDir.getPath());
context.put("sink.rollInterval", "1");
context.put("sink.batchSize", "1");
Configurables.configure(sink, context);
Channel channel = new PseudoTxnMemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
for (int i = 0; i < 10; i++) {
Event event = new SimpleEvent();
event.setBody(("Test event " + i).getBytes());
channel.put(event);
sink.process();
Thread.sleep(500);
}
sink.stop();
for (String file : sink.getDirectory().list()) {
BufferedReader reader =
new BufferedReader(new FileReader(new File(sink.getDirectory(), file)));
String lastLine = null;
String currentLine = null;
while ((currentLine = reader.readLine()) != null) {
lastLine = currentLine;
}
logger.debug("Produced file:{} lastLine:{}", file, lastLine);
reader.close();
}
}
示例11: testCloseOnIdle
import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testCloseOnIdle() throws IOException, EventDeliveryException, InterruptedException {
String hdfsPath = testPath + "/idleClose";
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(hdfsPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", hdfsPath);
/*
* All three rolling methods are disabled so the only
* way a file can roll is through the idle timeout.
*/
context.put("hdfs.rollCount", "0");
context.put("hdfs.rollSize", "0");
context.put("hdfs.rollInterval", "0");
context.put("hdfs.batchSize", "2");
context.put("hdfs.idleTimeout", "1");
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Transaction txn = channel.getTransaction();
txn.begin();
for (int i = 0; i < 10; i++) {
Event event = new SimpleEvent();
event.setBody(("test event " + i).getBytes());
channel.put(event);
}
txn.commit();
txn.close();
sink.process();
sink.process();
Thread.sleep(1001);
// previous file should have timed out now
// this can throw BucketClosedException(from the bucketWriter having
// closed),this is not an issue as the sink will retry and get a fresh
// bucketWriter so long as the onClose handler properly removes
// bucket writers that were closed.
sink.process();
sink.process();
Thread.sleep(500); // shouldn't be enough for a timeout to occur
sink.process();
sink.process();
sink.stop();
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
Assert.assertEquals("Incorrect content of the directory " + StringUtils.join(fList, ","),
2, fList.length);
Assert.assertTrue(!fList[0].getName().endsWith(".tmp") &&
!fList[1].getName().endsWith(".tmp"));
fs.close();
}
示例12: testSingleWriterSimplePartitionedTable
import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testSingleWriterSimplePartitionedTable()
throws EventDeliveryException, IOException, CommandNeedRetryException {
int totalRecords = 4;
int batchSize = 2;
int batchCount = totalRecords / batchSize;
Context context = new Context();
context.put("hive.metastore", metaStoreURI);
context.put("hive.database",dbName);
context.put("hive.table",tblName);
context.put("hive.partition", PART1_VALUE + "," + PART2_VALUE);
context.put("autoCreatePartitions","false");
context.put("batchSize","" + batchSize);
context.put("serializer", HiveDelimitedTextSerializer.ALIAS);
context.put("serializer.fieldnames", COL1 + ",," + COL2 + ",");
context.put("heartBeatInterval", "0");
Channel channel = startSink(sink, context);
List<String> bodies = Lists.newArrayList();
// push the events in two batches
Transaction txn = channel.getTransaction();
txn.begin();
for (int j = 1; j <= totalRecords; j++) {
Event event = new SimpleEvent();
String body = j + ",blah,This is a log message,other stuff";
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
}
// execute sink to process the events
txn.commit();
txn.close();
checkRecordCountInTable(0, dbName, tblName);
for (int i = 0; i < batchCount ; i++) {
sink.process();
}
sink.stop();
checkRecordCountInTable(totalRecords, dbName, tblName);
}
示例13: testSingleWriterSimpleUnPartitionedTable
import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testSingleWriterSimpleUnPartitionedTable()
throws Exception {
TestUtil.dropDB(conf, dbName2);
String dbLocation = dbFolder.newFolder(dbName2).getCanonicalPath() + ".db";
dbLocation = dbLocation.replaceAll("\\\\","/"); // for windows paths
TestUtil.createDbAndTable(driver, dbName2, tblName2, null, colNames2, colTypes2,
null, dbLocation);
try {
int totalRecords = 4;
int batchSize = 2;
int batchCount = totalRecords / batchSize;
Context context = new Context();
context.put("hive.metastore", metaStoreURI);
context.put("hive.database", dbName2);
context.put("hive.table", tblName2);
context.put("autoCreatePartitions","false");
context.put("batchSize","" + batchSize);
context.put("serializer", HiveDelimitedTextSerializer.ALIAS);
context.put("serializer.fieldnames", COL1 + ",," + COL2 + ",");
context.put("heartBeatInterval", "0");
Channel channel = startSink(sink, context);
List<String> bodies = Lists.newArrayList();
// Push the events in two batches
Transaction txn = channel.getTransaction();
txn.begin();
for (int j = 1; j <= totalRecords; j++) {
Event event = new SimpleEvent();
String body = j + ",blah,This is a log message,other stuff";
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
}
txn.commit();
txn.close();
checkRecordCountInTable(0, dbName2, tblName2);
for (int i = 0; i < batchCount ; i++) {
sink.process();
}
// check before & after stopping sink
checkRecordCountInTable(totalRecords, dbName2, tblName2);
sink.stop();
checkRecordCountInTable(totalRecords, dbName2, tblName2);
} finally {
TestUtil.dropDB(conf, dbName2);
}
}
示例14: doTestTextBatchAppend
import org.apache.flume.Event; //導入方法依賴的package包/類
public void doTestTextBatchAppend(boolean useRawLocalFileSystem)
throws Exception {
LOG.debug("Starting...");
final long rollCount = 10;
final long batchSize = 2;
final String fileName = "FlumeData";
String newPath = testPath + "/singleTextBucket";
int totalEvents = 0;
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
// context.put("hdfs.path", testPath + "/%Y-%m-%d/%H");
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.rollInterval", "0");
context.put("hdfs.rollSize", "0");
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.writeFormat", "Text");
context.put("hdfs.useRawLocalFileSystem",
Boolean.toString(useRawLocalFileSystem));
context.put("hdfs.fileType", "DataStream");
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel to roll twice
for (i = 1; i <= (rollCount * 10) / batchSize; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
// loop through all the files generated and check their contains
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
// check that the roll happened correctly for the given data
long expectedFiles = totalEvents / rollCount;
if (totalEvents % rollCount > 0) expectedFiles++;
Assert.assertEquals("num files wrong, found: " +
Lists.newArrayList(fList), expectedFiles, fList.length);
// check the contents of the all files
verifyOutputTextFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
示例15: testAvroAppend
import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testAvroAppend() throws InterruptedException, LifecycleException,
EventDeliveryException, IOException {
LOG.debug("Starting...");
final long rollCount = 3;
final long batchSize = 2;
final String fileName = "FlumeData";
String newPath = testPath + "/singleTextBucket";
int totalEvents = 0;
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
// context.put("hdfs.path", testPath + "/%Y-%m-%d/%H");
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.writeFormat", "Text");
context.put("hdfs.fileType", "DataStream");
context.put("serializer", "AVRO_EVENT");
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 1; i < 4; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
// loop through all the files generated and check their contains
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
// check that the roll happened correctly for the given data
long expectedFiles = totalEvents / rollCount;
if (totalEvents % rollCount > 0) expectedFiles++;
Assert.assertEquals("num files wrong, found: " +
Lists.newArrayList(fList), expectedFiles, fList.length);
verifyOutputAvroFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}