本文整理汇总了Java中org.apache.flume.event.EventBuilder.withBody方法的典型用法代码示例。如果您正苦于以下问题:Java EventBuilder.withBody方法的具体用法?Java EventBuilder.withBody怎么用?Java EventBuilder.withBody使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flume.event.EventBuilder
的用法示例。
在下文中一共展示了EventBuilder.withBody方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testUseHostname
import org.apache.flume.event.EventBuilder; //导入方法依赖的package包/类
/**
* Ensure host name can be used instead of host IP.
*/
@Test
public void testUseHostname() throws Exception {
Context ctx = new Context();
ctx.put("useIP", "false");
Interceptor.Builder builder = InterceptorBuilderFactory
.newInstance(InterceptorType.HOST.toString());
builder.configure(ctx);
Interceptor interceptor = builder.build();
final String ORIGINAL_HOST = "originalhost";
Event eventBeforeIntercept = EventBuilder.withBody("test event",
Charsets.UTF_8);
eventBeforeIntercept.getHeaders().put(Constants.HOST, ORIGINAL_HOST);
Assert.assertEquals(ORIGINAL_HOST, eventBeforeIntercept.getHeaders()
.get(Constants.HOST));
String expectedHost = InetAddress.getLocalHost().getCanonicalHostName();
Event eventAfterIntercept = interceptor.intercept(eventBeforeIntercept);
String actualHost = eventAfterIntercept.getHeaders().get(Constants.HOST);
Assert.assertNotNull(actualHost);
Assert.assertEquals(expectedHost, actualHost);
}
示例2: testInUsePrefix
import org.apache.flume.event.EventBuilder; //导入方法依赖的package包/类
@Test
public void testInUsePrefix() throws IOException, InterruptedException {
final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test
final String PREFIX = "BRNO_IS_CITY_IN_CZECH_REPUBLIC";
MockHDFSWriter hdfsWriter = new MockHDFSWriter();
HDFSTextSerializer formatter = new HDFSTextSerializer();
BucketWriter bucketWriter = new BucketWriter(
ROLL_INTERVAL, 0, 0, 0, ctx, "/tmp", "file", PREFIX, ".tmp", null, null,
SequenceFile.CompressionType.NONE, hdfsWriter, timedRollerPool, proxy,
new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0, null, null, 30000,
Executors.newSingleThreadExecutor(), 0, 0);
Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
bucketWriter.append(e);
Assert.assertTrue("Incorrect in use prefix", hdfsWriter.getOpenedFilePath().contains(PREFIX));
}
示例3: testInUseSuffix
import org.apache.flume.event.EventBuilder; //导入方法依赖的package包/类
@Test
public void testInUseSuffix() throws IOException, InterruptedException {
final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test
final String SUFFIX = "WELCOME_TO_THE_HELLMOUNTH";
MockHDFSWriter hdfsWriter = new MockHDFSWriter();
HDFSTextSerializer serializer = new HDFSTextSerializer();
BucketWriter bucketWriter = new BucketWriter(
ROLL_INTERVAL, 0, 0, 0, ctx, "/tmp", "file", "", SUFFIX, null, null,
SequenceFile.CompressionType.NONE, hdfsWriter, timedRollerPool, proxy,
new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0, null, null, 30000,
Executors.newSingleThreadExecutor(), 0, 0);
Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
bucketWriter.append(e);
Assert.assertTrue("Incorrect in use suffix", hdfsWriter.getOpenedFilePath().contains(SUFFIX));
}
示例4: prepEventData
import org.apache.flume.event.EventBuilder; //导入方法依赖的package包/类
private void prepEventData(int bufferSize) {
buffer = new byte[bufferSize];
Arrays.fill(buffer, Byte.MAX_VALUE);
if (batchSize > 1) {
//Create event objects in case of batch test
eventBatchList = new ArrayList<Event>();
for (int i = 0; i < batchSize; i++) {
eventBatchList.add(EventBuilder.withBody(buffer));
}
} else {
//Create single event in case of non-batch test
event = EventBuilder.withBody(buffer);
}
}
示例5: testSizeRoller
import org.apache.flume.event.EventBuilder; //导入方法依赖的package包/类
@Test
public void testSizeRoller() throws IOException, InterruptedException {
int maxBytes = 300;
MockHDFSWriter hdfsWriter = new MockHDFSWriter();
BucketWriter bucketWriter = new BucketWriter(
0, maxBytes, 0, 0, ctx, "/tmp", "file", "", ".tmp", null, null,
SequenceFile.CompressionType.NONE, hdfsWriter, timedRollerPool, proxy,
new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0, null, null, 30000,
Executors.newSingleThreadExecutor(), 0, 0);
Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
for (int i = 0; i < 1000; i++) {
bucketWriter.append(e);
}
logger.info("Number of events written: {}", hdfsWriter.getEventsWritten());
logger.info("Number of bytes written: {}", hdfsWriter.getBytesWritten());
logger.info("Number of files opened: {}", hdfsWriter.getFilesOpened());
Assert.assertEquals("events written", 1000, hdfsWriter.getEventsWritten());
Assert.assertEquals("bytes written", 3000, hdfsWriter.getBytesWritten());
Assert.assertEquals("files opened", 10, hdfsWriter.getFilesOpened());
}
示例6: testReplace
import org.apache.flume.event.EventBuilder; //导入方法依赖的package包/类
@Test
public void testReplace() throws ClassNotFoundException,
InstantiationException, IllegalAccessException {
Interceptor.Builder builder = InterceptorBuilderFactory.newInstance(
InterceptorType.STATIC.toString());
Context ctx = new Context();
ctx.put(Constants.PRESERVE, "false");
ctx.put(Constants.VALUE, "replacement value");
builder.configure(ctx);
Interceptor interceptor = builder.build();
Event event = EventBuilder.withBody("test", Charsets.UTF_8);
event.getHeaders().put(Constants.KEY, "incumbent value");
Assert.assertNotNull(event.getHeaders().get(Constants.KEY));
event = interceptor.intercept(event);
String val = event.getHeaders().get(Constants.KEY);
Assert.assertNotNull(val);
Assert.assertEquals("replacement value", val);
}
示例7: testCallbackOnClose
import org.apache.flume.event.EventBuilder; //导入方法依赖的package包/类
@Test
public void testCallbackOnClose() throws IOException, InterruptedException {
final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test
final String SUFFIX = "WELCOME_TO_THE_EREBOR";
final AtomicBoolean callbackCalled = new AtomicBoolean(false);
MockHDFSWriter hdfsWriter = new MockHDFSWriter();
BucketWriter bucketWriter = new BucketWriter(
ROLL_INTERVAL, 0, 0, 0, ctx, "/tmp", "file", "", SUFFIX, null, null,
SequenceFile.CompressionType.NONE, hdfsWriter, timedRollerPool, proxy,
new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0,
new HDFSEventSink.WriterCallback() {
@Override
public void run(String filePath) {
callbackCalled.set(true);
}
}, "blah", 30000, Executors.newSingleThreadExecutor(), 0, 0);
Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
bucketWriter.append(e);
bucketWriter.close(true);
Assert.assertTrue(callbackCalled.get());
}
示例8: generateSyslogEvents
import org.apache.flume.event.EventBuilder; //导入方法依赖的package包/类
private static List<Event> generateSyslogEvents() {
List<Event> list = Lists.newArrayList();
Event e;
// generate one that we supposedly parsed with SyslogTcpSource
e = EventBuilder.withBody("Apr 7 01:00:00 host Msg 01", Charsets.UTF_8);
e.getHeaders().put(SyslogUtils.SYSLOG_FACILITY, "1");
e.getHeaders().put(SyslogUtils.SYSLOG_SEVERITY, "2");
list.add(e);
// generate another supposedly parsed with SyslogTcpSource with 2-digit date
e = EventBuilder.withBody("Apr 22 01:00:00 host Msg 02", Charsets.UTF_8);
e.getHeaders().put(SyslogUtils.SYSLOG_FACILITY, "1");
e.getHeaders().put(SyslogUtils.SYSLOG_SEVERITY, "3");
list.add(e);
// generate a "raw" syslog event
e = EventBuilder.withBody("<8>Apr 22 01:00:00 host Msg 03", Charsets.UTF_8);
list.add(e);
return list;
}
示例9: testGrokIfNotMatchDropEventRetain
import org.apache.flume.event.EventBuilder; //导入方法依赖的package包/类
@Test
public void testGrokIfNotMatchDropEventRetain() throws Exception {
Context context = new Context();
context.put(MorphlineHandlerImpl.MORPHLINE_FILE_PARAM,
RESOURCES_DIR + "/test-morphlines/grokIfNotMatchDropRecord.conf");
String msg = "<164>Feb 4 10:46:14 syslog sshd[607]: Server listening on 0.0.0.0 port 22.";
Event input = EventBuilder.withBody(null, ImmutableMap.of(Fields.MESSAGE, msg));
Event actual = build(context).intercept(input);
Map<String, String> expected = new HashMap();
expected.put(Fields.MESSAGE, msg);
expected.put("syslog_pri", "164");
expected.put("syslog_timestamp", "Feb 4 10:46:14");
expected.put("syslog_hostname", "syslog");
expected.put("syslog_program", "sshd");
expected.put("syslog_pid", "607");
expected.put("syslog_message", "Server listening on 0.0.0.0 port 22.");
Event expectedEvent = EventBuilder.withBody(null, expected);
assertEqualsEvent(expectedEvent, actual);
}
示例10: testGrokIfNotMatchDropEventDrop
import org.apache.flume.event.EventBuilder; //导入方法依赖的package包/类
@Test
/* leading XXXXX does not match regex, thus we expect the event to be dropped */
public void testGrokIfNotMatchDropEventDrop() throws Exception {
Context context = new Context();
context.put(MorphlineHandlerImpl.MORPHLINE_FILE_PARAM,
RESOURCES_DIR + "/test-morphlines/grokIfNotMatchDropRecord.conf");
String msg = "<XXXXXXXXXXXXX164>Feb 4 10:46:14 syslog sshd[607]: Server listening on 0.0.0.0" +
" port 22.";
Event input = EventBuilder.withBody(null, ImmutableMap.of(Fields.MESSAGE, msg));
Event actual = build(context).intercept(input);
assertNull(actual);
}
示例11: getEvents
import org.apache.flume.event.EventBuilder; //导入方法依赖的package包/类
/**
* {@inheritDoc}
*/
@SuppressWarnings("unchecked")
@Override
public List<Event> getEvents(HttpServletRequest request) throws Exception {
Map<String, String> headers = new HashMap<String, String>();
InputStream inputStream = request.getInputStream();
Map<String, String[]> parameters = request.getParameterMap();
for (String parameter : parameters.keySet()) {
String value = parameters.get(parameter)[0];
if (LOG.isDebugEnabled() && LogPrivacyUtil.allowLogRawData()) {
LOG.debug("Setting Header [Key, Value] as [{},{}] ", parameter, value);
}
headers.put(parameter, value);
}
for (String header : mandatoryHeaders) {
Preconditions.checkArgument(headers.containsKey(header),
"Please specify " + header + " parameter in the request.");
}
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
try {
IOUtils.copy(inputStream, outputStream);
LOG.debug("Building an Event with stream of size -- {}", outputStream.size());
Event event = EventBuilder.withBody(outputStream.toByteArray(), headers);
event.setHeaders(headers);
List<Event> eventList = new ArrayList<Event>();
eventList.add(event);
return eventList;
} finally {
outputStream.close();
inputStream.close();
}
}
示例12: testProcess
import org.apache.flume.event.EventBuilder; //导入方法依赖的package包/类
@Test
public void testProcess() throws Exception {
Event event = EventBuilder.withBody("test event 1", Charsets.UTF_8);
src = new ThriftTestingSource(ThriftTestingSource.HandlerType.OK.name(),
port, ThriftRpcClient.COMPACT_PROTOCOL);
channel.start();
sink.start();
Transaction transaction = channel.getTransaction();
transaction.begin();
for (int i = 0; i < 11; i++) {
channel.put(event);
}
transaction.commit();
transaction.close();
for (int i = 0; i < 6; i++) {
Sink.Status status = sink.process();
Assert.assertEquals(Sink.Status.READY, status);
}
Assert.assertEquals(Sink.Status.BACKOFF, sink.process());
sink.stop();
Assert.assertEquals(11, src.flumeEvents.size());
Assert.assertEquals(6, src.batchCount);
Assert.assertEquals(0, src.individualCount);
}
示例13: testFileSuffixNotGiven
import org.apache.flume.event.EventBuilder; //导入方法依赖的package包/类
@Test
public void testFileSuffixNotGiven() throws IOException, InterruptedException {
final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test
final String suffix = null;
MockHDFSWriter hdfsWriter = new MockHDFSWriter();
BucketWriter bucketWriter = new BucketWriter(
ROLL_INTERVAL, 0, 0, 0, ctx, "/tmp", "file", "", ".tmp", suffix, null,
SequenceFile.CompressionType.NONE, hdfsWriter, timedRollerPool, proxy,
new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()), 0, null, null, 30000,
Executors.newSingleThreadExecutor(), 0, 0);
// Need to override system time use for test so we know what to expect
final long testTime = System.currentTimeMillis();
Clock testClock = new Clock() {
public long currentTimeMillis() {
return testTime;
}
};
bucketWriter.setClock(testClock);
Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
bucketWriter.append(e);
Assert.assertTrue("Incorrect suffix", hdfsWriter.getOpenedFilePath().endsWith(
Long.toString(testTime + 1) + ".tmp"));
}
示例14: testMissingTable
import org.apache.flume.event.EventBuilder; //导入方法依赖的package包/类
@Test(expected = FlumeException.class)
public void testMissingTable() throws Exception {
deleteTable = false;
ctx.put("batchSize", "2");
AsyncHBaseSink sink = new AsyncHBaseSink(testUtility.getConfiguration());
Configurables.configure(sink, ctx);
//Reset the context to a higher batchSize
ctx.put("batchSize", "100");
Channel channel = new MemoryChannel();
Configurables.configure(channel, ctx);
sink.setChannel(channel);
sink.start();
Transaction tx = channel.getTransaction();
tx.begin();
for (int i = 0; i < 3; i++) {
Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + i));
channel.put(e);
}
tx.commit();
tx.close();
sink.process();
Assert.assertFalse(sink.isConfNull());
HTable table = new HTable(testUtility.getConfiguration(), tableName);
byte[][] results = getResults(table, 2);
byte[] out;
int found = 0;
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
if (Arrays.equals(results[j], Bytes.toBytes(valBase + "-" + i))) {
found++;
break;
}
}
}
Assert.assertEquals(2, found);
out = results[2];
Assert.assertArrayEquals(Longs.toByteArray(2), out);
sink.process();
sink.stop();
}
示例15: append
import org.apache.flume.event.EventBuilder; //导入方法依赖的package包/类
public void append(ThriftFlumeEvent evt ) {
if (evt == null) {
return;
}
Map<String, String> headers = new HashMap<String, String>();
// extract Flume event headers
headers.put(HOST, evt.getHost());
headers.put(TIMESTAMP, Long.toString(evt.getTimestamp()));
headers.put(PRIORITY, evt.getPriority().toString());
headers.put(NANOS, Long.toString(evt.getNanos()));
for (Entry<String, ByteBuffer> entry: evt.getFields().entrySet()) {
headers.put(entry.getKey().toString(),
UTF_8.decode(entry.getValue()).toString());
}
headers.put(OG_EVENT, "yes");
Event event = EventBuilder.withBody(evt.getBody(), headers);
counterGroup.incrementAndGet("rpc.events");
try {
getChannelProcessor().processEvent(event);
} catch (ChannelException ex) {
LOG.warn("Failed to process event", ex);
return;
}
counterGroup.incrementAndGet("rpc.successful");
return;
}