當前位置: 首頁>>代碼示例>>Java>>正文


Java Event.setBody方法代碼示例

本文整理匯總了Java中org.apache.flume.Event.setBody方法的典型用法代碼示例。如果您正苦於以下問題:Java Event.setBody方法的具體用法?Java Event.setBody怎麽用?Java Event.setBody使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.flume.Event的用法示例。


在下文中一共展示了Event.setBody方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: intercept

import org.apache.flume.Event; //導入方法依賴的package包/類
@Override
public Event intercept(Event event)
{        
    Map<String, String> headers = event.getHeaders();
    
    String eventBody = new String(event.getBody(), UTF_8);
    eventBody = eventBody.trim();
    
    int index = eventBody.indexOf(' ');
        
    if (index == -1) {
        headers.put(this.header, this.defaultTag);
    }
    else {
        String appTag = eventBody.substring(0, index);
        headers.put(this.header, appTag);
    }

    if (this.preserveTag == false) {
        String newEventBody = eventBody.substring(index, eventBody.length()).trim();
        event.setBody(newEventBody.getBytes());
    }

    return event;
}
 
開發者ID:InterestingLab,項目名稱:flume-ng-interceptor-apptag,代碼行數:26,代碼來源:AppTagInterceptor.java

示例2: testMissingSchema

import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testMissingSchema() throws EventDeliveryException {
  final DatasetSink sink = sink(in, config);

  Event badEvent = new SimpleEvent();
  badEvent.setHeaders(Maps.<String, String>newHashMap());
  badEvent.setBody(serialize(expected.get(0), RECORD_SCHEMA));
  putToChannel(in, badEvent);

  // run the sink
  sink.start();
  assertThrows("Should fail", EventDeliveryException.class,
      new Callable() {
        @Override
        public Object call() throws EventDeliveryException {
          sink.process();
          return null;
        }
      });
  sink.stop();

  Assert.assertEquals("Should have rolled back",
      expected.size() + 1, remaining(in));
}
 
開發者ID:moueimei,項目名稱:flume-release-1.7.0,代碼行數:25,代碼來源:TestDatasetSink.java

示例3: withBody

import org.apache.flume.Event; //導入方法依賴的package包/類
/**
 * Instantiate an Event instance based on the provided body and headers.
 * If <code>headers</code> is <code>null</code>, then it is ignored.
 * @param body
 * @param headers
 * @return
 */
public static Event withBody(byte[] body, Map<String, String> headers) {
  Event event = new SimpleEvent();

  if (body == null) {
    body = new byte[0];
  }
  event.setBody(body);

  if (headers != null) {
    event.setHeaders(new HashMap<String, String>(headers));
  }

  return event;
}
 
開發者ID:moueimei,項目名稱:flume-release-1.7.0,代碼行數:22,代碼來源:EventBuilder.java

示例4: intercept

import org.apache.flume.Event; //導入方法依賴的package包/類
@Override
public Event intercept(Event event) {
  String origBody = new String(event.getBody(), charset);
  Matcher matcher = searchPattern.matcher(origBody);
  String newBody = matcher.replaceAll(replaceString);
  event.setBody(newBody.getBytes(charset));
  return event;
}
 
開發者ID:moueimei,項目名稱:flume-release-1.7.0,代碼行數:9,代碼來源:SearchAndReplaceInterceptor.java

示例5: event

import org.apache.flume.Event; //導入方法依賴的package包/類
public static Event event(
    Object datum, Schema schema, File file, boolean useURI) {
  Map<String, String> headers = Maps.newHashMap();
  if (useURI) {
    headers.put(DatasetSinkConstants.AVRO_SCHEMA_URL_HEADER,
        file.getAbsoluteFile().toURI().toString());
  } else {
    headers.put(DatasetSinkConstants.AVRO_SCHEMA_LITERAL_HEADER,
        schema.toString());
  }
  Event e = new SimpleEvent();
  e.setBody(serialize(datum, schema));
  e.setHeaders(headers);
  return e;
}
 
開發者ID:moueimei,項目名稱:flume-release-1.7.0,代碼行數:16,代碼來源:TestDatasetSink.java

示例6: testSingleWriterUseHeaders

import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testSingleWriterUseHeaders()
        throws Exception {
  String[] colNames = {COL1, COL2};
  String PART1_NAME = "country";
  String PART2_NAME = "hour";
  String[] partNames = {PART1_NAME, PART2_NAME};
  List<String> partitionVals = null;
  String PART1_VALUE = "%{" + PART1_NAME + "}";
  String PART2_VALUE = "%y-%m-%d-%k";
  partitionVals = new ArrayList<String>(2);
  partitionVals.add(PART1_VALUE);
  partitionVals.add(PART2_VALUE);

  String tblName = "hourlydata";
  TestUtil.dropDB(conf, dbName2);
  String dbLocation = dbFolder.newFolder(dbName2).getCanonicalPath() + ".db";
  dbLocation = dbLocation.replaceAll("\\\\","/"); // for windows paths
  TestUtil.createDbAndTable(driver, dbName2, tblName, partitionVals, colNames,
          colTypes, partNames, dbLocation);

  int totalRecords = 4;
  int batchSize = 2;
  int batchCount = totalRecords / batchSize;

  Context context = new Context();
  context.put("hive.metastore",metaStoreURI);
  context.put("hive.database",dbName2);
  context.put("hive.table",tblName);
  context.put("hive.partition", PART1_VALUE + "," + PART2_VALUE);
  context.put("autoCreatePartitions","true");
  context.put("useLocalTimeStamp", "false");
  context.put("batchSize","" + batchSize);
  context.put("serializer", HiveDelimitedTextSerializer.ALIAS);
  context.put("serializer.fieldnames", COL1 + ",," + COL2 + ",");
  context.put("heartBeatInterval", "0");

  Channel channel = startSink(sink, context);

  Calendar eventDate = Calendar.getInstance();
  List<String> bodies = Lists.newArrayList();

  // push events in two batches - two per batch. each batch is diff hour
  Transaction txn = channel.getTransaction();
  txn.begin();
  for (int j = 1; j <= totalRecords; j++) {
    Event event = new SimpleEvent();
    String body = j + ",blah,This is a log message,other stuff";
    event.setBody(body.getBytes());
    eventDate.clear();
    eventDate.set(2014, 03, 03, j % batchCount, 1); // yy mm dd hh mm
    event.getHeaders().put( "timestamp",
            String.valueOf(eventDate.getTimeInMillis()) );
    event.getHeaders().put( PART1_NAME, "Asia" );
    bodies.add(body);
    channel.put(event);
  }
  // execute sink to process the events
  txn.commit();
  txn.close();

  checkRecordCountInTable(0, dbName2, tblName);
  for (int i = 0; i < batchCount ; i++) {
    sink.process();
  }
  checkRecordCountInTable(totalRecords, dbName2, tblName);
  sink.stop();

  // verify counters
  SinkCounter counter = sink.getCounter();
  Assert.assertEquals(2, counter.getConnectionCreatedCount());
  Assert.assertEquals(2, counter.getConnectionClosedCount());
  Assert.assertEquals(2, counter.getBatchCompleteCount());
  Assert.assertEquals(0, counter.getBatchEmptyCount());
  Assert.assertEquals(0, counter.getConnectionFailedCount() );
  Assert.assertEquals(4, counter.getEventDrainAttemptCount());
  Assert.assertEquals(4, counter.getEventDrainSuccessCount() );

}
 
開發者ID:moueimei,項目名稱:flume-release-1.7.0,代碼行數:80,代碼來源:TestHiveSink.java

示例7: testMissingSchemaWithSavePolicy

import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testMissingSchemaWithSavePolicy() throws EventDeliveryException {
  if (Datasets.exists(ERROR_DATASET_URI)) {
    Datasets.delete(ERROR_DATASET_URI);
  }
  config.put(DatasetSinkConstants.CONFIG_FAILURE_POLICY,
      DatasetSinkConstants.SAVE_FAILURE_POLICY);
  config.put(DatasetSinkConstants.CONFIG_KITE_ERROR_DATASET_URI,
      ERROR_DATASET_URI);
  final DatasetSink sink = sink(in, config);

  Event badEvent = new SimpleEvent();
  badEvent.setHeaders(Maps.<String, String>newHashMap());
  badEvent.setBody(serialize(expected.get(0), RECORD_SCHEMA));
  putToChannel(in, badEvent);

  // run the sink
  sink.start();
  sink.process();
  sink.stop();

  Assert.assertEquals("Good records should have been written",
      Sets.newHashSet(expected),
      read(Datasets.load(FILE_DATASET_URI)));
  Assert.assertEquals("Should not have rolled back", 0, remaining(in));
  Assert.assertEquals("Should have saved the bad event",
      Sets.newHashSet(AvroFlumeEvent.newBuilder()
        .setBody(ByteBuffer.wrap(badEvent.getBody()))
        .setHeaders(toUtf8Map(badEvent.getHeaders()))
        .build()),
      read(Datasets.load(ERROR_DATASET_URI, AvroFlumeEvent.class)));
}
 
開發者ID:moueimei,項目名稱:flume-release-1.7.0,代碼行數:33,代碼來源:TestDatasetSink.java

示例8: testTextAppend

import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testTextAppend() throws InterruptedException, LifecycleException,
    EventDeliveryException, IOException {

  LOG.debug("Starting...");
  final long rollCount = 3;
  final long batchSize = 2;
  final String fileName = "FlumeData";
  String newPath = testPath + "/singleTextBucket";
  int totalEvents = 0;
  int i = 1, j = 1;

  // clear the test directory
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  Path dirPath = new Path(newPath);
  fs.delete(dirPath, true);
  fs.mkdirs(dirPath);

  Context context = new Context();

  // context.put("hdfs.path", testPath + "/%Y-%m-%d/%H");
  context.put("hdfs.path", newPath);
  context.put("hdfs.filePrefix", fileName);
  context.put("hdfs.rollCount", String.valueOf(rollCount));
  context.put("hdfs.batchSize", String.valueOf(batchSize));
  context.put("hdfs.writeFormat", "Text");
  context.put("hdfs.fileType", "DataStream");

  Configurables.configure(sink, context);

  Channel channel = new MemoryChannel();
  Configurables.configure(channel, context);

  sink.setChannel(channel);
  sink.start();

  Calendar eventDate = Calendar.getInstance();
  List<String> bodies = Lists.newArrayList();

  // push the event batches into channel
  for (i = 1; i < 4; i++) {
    Transaction txn = channel.getTransaction();
    txn.begin();
    for (j = 1; j <= batchSize; j++) {
      Event event = new SimpleEvent();
      eventDate.clear();
      eventDate.set(2011, i, i, i, 0); // yy mm dd
      event.getHeaders().put("timestamp",
          String.valueOf(eventDate.getTimeInMillis()));
      event.getHeaders().put("hostname", "Host" + i);
      String body = "Test." + i + "." + j;
      event.setBody(body.getBytes());
      bodies.add(body);
      channel.put(event);
      totalEvents++;
    }
    txn.commit();
    txn.close();

    // execute sink to process the events
    sink.process();
  }

  sink.stop();

  // loop through all the files generated and check their contains
  FileStatus[] dirStat = fs.listStatus(dirPath);
  Path[] fList = FileUtil.stat2Paths(dirStat);

  // check that the roll happened correctly for the given data
  long expectedFiles = totalEvents / rollCount;
  if (totalEvents % rollCount > 0) expectedFiles++;
  Assert.assertEquals("num files wrong, found: " +
      Lists.newArrayList(fList), expectedFiles, fList.length);
  verifyOutputTextFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
 
開發者ID:moueimei,項目名稱:flume-release-1.7.0,代碼行數:78,代碼來源:TestHDFSEventSink.java

示例9: testSlowAppendFailure

import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testSlowAppendFailure() throws InterruptedException,
    LifecycleException, EventDeliveryException, IOException {

  LOG.debug("Starting...");
  final String fileName = "FlumeData";
  final long rollCount = 5;
  final long batchSize = 2;
  final int numBatches = 2;
  String newPath = testPath + "/singleBucket";
  int i = 1, j = 1;

  // clear the test directory
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  Path dirPath = new Path(newPath);
  fs.delete(dirPath, true);
  fs.mkdirs(dirPath);

  // create HDFS sink with slow writer
  HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
  sink = new HDFSEventSink(badWriterFactory);

  Context context = new Context();
  context.put("hdfs.path", newPath);
  context.put("hdfs.filePrefix", fileName);
  context.put("hdfs.rollCount", String.valueOf(rollCount));
  context.put("hdfs.batchSize", String.valueOf(batchSize));
  context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
  context.put("hdfs.callTimeout", Long.toString(1000));
  Configurables.configure(sink, context);

  Channel channel = new MemoryChannel();
  Configurables.configure(channel, context);

  sink.setChannel(channel);
  sink.start();

  Calendar eventDate = Calendar.getInstance();

  // push the event batches into channel
  for (i = 0; i < numBatches; i++) {
    Transaction txn = channel.getTransaction();
    txn.begin();
    for (j = 1; j <= batchSize; j++) {
      Event event = new SimpleEvent();
      eventDate.clear();
      eventDate.set(2011, i, i, i, 0); // yy mm dd
      event.getHeaders().put("timestamp",
          String.valueOf(eventDate.getTimeInMillis()));
      event.getHeaders().put("hostname", "Host" + i);
      event.getHeaders().put("slow", "1500");
      event.setBody(("Test." + i + "." + j).getBytes());
      channel.put(event);
    }
    txn.commit();
    txn.close();

    // execute sink to process the events
    Status satus = sink.process();

    // verify that the append returned backoff due to timeotu
    Assert.assertEquals(satus, Status.BACKOFF);
  }

  sink.stop();
}
 
開發者ID:moueimei,項目名稱:flume-release-1.7.0,代碼行數:68,代碼來源:TestHDFSEventSink.java

示例10: testAppend

import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testAppend() throws InterruptedException, LifecycleException,
    EventDeliveryException, IOException {

  Context context = new Context();

  context.put("sink.directory", tmpDir.getPath());
  context.put("sink.rollInterval", "1");
  context.put("sink.batchSize", "1");

  Configurables.configure(sink, context);

  Channel channel = new PseudoTxnMemoryChannel();
  Configurables.configure(channel, context);

  sink.setChannel(channel);
  sink.start();

  for (int i = 0; i < 10; i++) {
    Event event = new SimpleEvent();

    event.setBody(("Test event " + i).getBytes());

    channel.put(event);
    sink.process();

    Thread.sleep(500);
  }

  sink.stop();

  for (String file : sink.getDirectory().list()) {
    BufferedReader reader =
        new BufferedReader(new FileReader(new File(sink.getDirectory(), file)));

    String lastLine = null;
    String currentLine = null;

    while ((currentLine = reader.readLine()) != null) {
      lastLine = currentLine;
    }

    logger.debug("Produced file:{} lastLine:{}", file, lastLine);

    reader.close();
  }
}
 
開發者ID:moueimei,項目名稱:flume-release-1.7.0,代碼行數:48,代碼來源:TestRollingFileSink.java

示例11: testCloseOnIdle

import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testCloseOnIdle() throws IOException, EventDeliveryException, InterruptedException {
  String hdfsPath = testPath + "/idleClose";

  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  Path dirPath = new Path(hdfsPath);
  fs.delete(dirPath, true);
  fs.mkdirs(dirPath);
  Context context = new Context();
  context.put("hdfs.path", hdfsPath);
  /*
   * All three rolling methods are disabled so the only
   * way a file can roll is through the idle timeout.
   */
  context.put("hdfs.rollCount", "0");
  context.put("hdfs.rollSize", "0");
  context.put("hdfs.rollInterval", "0");
  context.put("hdfs.batchSize", "2");
  context.put("hdfs.idleTimeout", "1");
  Configurables.configure(sink, context);

  Channel channel = new MemoryChannel();
  Configurables.configure(channel, context);

  sink.setChannel(channel);
  sink.start();

  Transaction txn = channel.getTransaction();
  txn.begin();
  for (int i = 0; i < 10; i++) {
    Event event = new SimpleEvent();
    event.setBody(("test event " + i).getBytes());
    channel.put(event);
  }
  txn.commit();
  txn.close();

  sink.process();
  sink.process();
  Thread.sleep(1001);
  // previous file should have timed out now
  // this can throw BucketClosedException(from the bucketWriter having
  // closed),this is not an issue as the sink will retry and get a fresh
  // bucketWriter so long as the onClose handler properly removes
  // bucket writers that were closed.
  sink.process();
  sink.process();
  Thread.sleep(500); // shouldn't be enough for a timeout to occur
  sink.process();
  sink.process();
  sink.stop();
  FileStatus[] dirStat = fs.listStatus(dirPath);
  Path[] fList = FileUtil.stat2Paths(dirStat);
  Assert.assertEquals("Incorrect content of the directory " + StringUtils.join(fList, ","),
                      2, fList.length);
  Assert.assertTrue(!fList[0].getName().endsWith(".tmp") &&
                    !fList[1].getName().endsWith(".tmp"));
  fs.close();
}
 
開發者ID:moueimei,項目名稱:flume-release-1.7.0,代碼行數:61,代碼來源:TestHDFSEventSink.java

示例12: testSingleWriterSimplePartitionedTable

import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testSingleWriterSimplePartitionedTable()
        throws EventDeliveryException, IOException, CommandNeedRetryException {
  int totalRecords = 4;
  int batchSize = 2;
  int batchCount = totalRecords / batchSize;

  Context context = new Context();
  context.put("hive.metastore", metaStoreURI);
  context.put("hive.database",dbName);
  context.put("hive.table",tblName);
  context.put("hive.partition", PART1_VALUE + "," + PART2_VALUE);
  context.put("autoCreatePartitions","false");
  context.put("batchSize","" + batchSize);
  context.put("serializer", HiveDelimitedTextSerializer.ALIAS);
  context.put("serializer.fieldnames", COL1 + ",," + COL2 + ",");
  context.put("heartBeatInterval", "0");

  Channel channel = startSink(sink, context);

  List<String> bodies = Lists.newArrayList();

  // push the events in two batches
  Transaction txn = channel.getTransaction();
  txn.begin();
  for (int j = 1; j <= totalRecords; j++) {
    Event event = new SimpleEvent();
    String body = j + ",blah,This is a log message,other stuff";
    event.setBody(body.getBytes());
    bodies.add(body);
    channel.put(event);
  }
  // execute sink to process the events
  txn.commit();
  txn.close();


  checkRecordCountInTable(0, dbName, tblName);
  for (int i = 0; i < batchCount ; i++) {
    sink.process();
  }
  sink.stop();
  checkRecordCountInTable(totalRecords, dbName, tblName);
}
 
開發者ID:moueimei,項目名稱:flume-release-1.7.0,代碼行數:45,代碼來源:TestHiveSink.java

示例13: testSingleWriterSimpleUnPartitionedTable

import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testSingleWriterSimpleUnPartitionedTable()
        throws Exception {
  TestUtil.dropDB(conf, dbName2);
  String dbLocation = dbFolder.newFolder(dbName2).getCanonicalPath() + ".db";
  dbLocation = dbLocation.replaceAll("\\\\","/"); // for windows paths
  TestUtil.createDbAndTable(driver, dbName2, tblName2, null, colNames2, colTypes2,
                            null, dbLocation);

  try {
    int totalRecords = 4;
    int batchSize = 2;
    int batchCount = totalRecords / batchSize;

    Context context = new Context();
    context.put("hive.metastore", metaStoreURI);
    context.put("hive.database", dbName2);
    context.put("hive.table", tblName2);
    context.put("autoCreatePartitions","false");
    context.put("batchSize","" + batchSize);
    context.put("serializer", HiveDelimitedTextSerializer.ALIAS);
    context.put("serializer.fieldnames", COL1 + ",," + COL2 + ",");
    context.put("heartBeatInterval", "0");

    Channel channel = startSink(sink, context);

    List<String> bodies = Lists.newArrayList();

    // Push the events in two batches
    Transaction txn = channel.getTransaction();
    txn.begin();
    for (int j = 1; j <= totalRecords; j++) {
      Event event = new SimpleEvent();
      String body = j + ",blah,This is a log message,other stuff";
      event.setBody(body.getBytes());
      bodies.add(body);
      channel.put(event);
    }

    txn.commit();
    txn.close();

    checkRecordCountInTable(0, dbName2, tblName2);
    for (int i = 0; i < batchCount ; i++) {
      sink.process();
    }

    // check before & after  stopping sink
    checkRecordCountInTable(totalRecords, dbName2, tblName2);
    sink.stop();
    checkRecordCountInTable(totalRecords, dbName2, tblName2);
  } finally {
    TestUtil.dropDB(conf, dbName2);
  }
}
 
開發者ID:moueimei,項目名稱:flume-release-1.7.0,代碼行數:56,代碼來源:TestHiveSink.java

示例14: doTestTextBatchAppend

import org.apache.flume.Event; //導入方法依賴的package包/類
public void doTestTextBatchAppend(boolean useRawLocalFileSystem)
    throws Exception {
  LOG.debug("Starting...");

  final long rollCount = 10;
  final long batchSize = 2;
  final String fileName = "FlumeData";
  String newPath = testPath + "/singleTextBucket";
  int totalEvents = 0;
  int i = 1, j = 1;

  // clear the test directory
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  Path dirPath = new Path(newPath);
  fs.delete(dirPath, true);
  fs.mkdirs(dirPath);

  Context context = new Context();

  // context.put("hdfs.path", testPath + "/%Y-%m-%d/%H");
  context.put("hdfs.path", newPath);
  context.put("hdfs.filePrefix", fileName);
  context.put("hdfs.rollCount", String.valueOf(rollCount));
  context.put("hdfs.rollInterval", "0");
  context.put("hdfs.rollSize", "0");
  context.put("hdfs.batchSize", String.valueOf(batchSize));
  context.put("hdfs.writeFormat", "Text");
  context.put("hdfs.useRawLocalFileSystem",
      Boolean.toString(useRawLocalFileSystem));
  context.put("hdfs.fileType", "DataStream");

  Configurables.configure(sink, context);

  Channel channel = new MemoryChannel();
  Configurables.configure(channel, context);

  sink.setChannel(channel);
  sink.start();

  Calendar eventDate = Calendar.getInstance();
  List<String> bodies = Lists.newArrayList();

  // push the event batches into channel to roll twice
  for (i = 1; i <= (rollCount * 10) / batchSize; i++) {
    Transaction txn = channel.getTransaction();
    txn.begin();
    for (j = 1; j <= batchSize; j++) {
      Event event = new SimpleEvent();
      eventDate.clear();
      eventDate.set(2011, i, i, i, 0); // yy mm dd
      String body = "Test." + i + "." + j;
      event.setBody(body.getBytes());
      bodies.add(body);
      channel.put(event);
      totalEvents++;
    }
    txn.commit();
    txn.close();

    // execute sink to process the events
    sink.process();
  }

  sink.stop();

  // loop through all the files generated and check their contains
  FileStatus[] dirStat = fs.listStatus(dirPath);
  Path[] fList = FileUtil.stat2Paths(dirStat);

  // check that the roll happened correctly for the given data
  long expectedFiles = totalEvents / rollCount;
  if (totalEvents % rollCount > 0) expectedFiles++;
  Assert.assertEquals("num files wrong, found: " +
      Lists.newArrayList(fList), expectedFiles, fList.length);
  // check the contents of the all files
  verifyOutputTextFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
 
開發者ID:moueimei,項目名稱:flume-release-1.7.0,代碼行數:79,代碼來源:TestHDFSEventSink.java

示例15: testAvroAppend

import org.apache.flume.Event; //導入方法依賴的package包/類
@Test
public void testAvroAppend() throws InterruptedException, LifecycleException,
    EventDeliveryException, IOException {

  LOG.debug("Starting...");
  final long rollCount = 3;
  final long batchSize = 2;
  final String fileName = "FlumeData";
  String newPath = testPath + "/singleTextBucket";
  int totalEvents = 0;
  int i = 1, j = 1;

  // clear the test directory
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  Path dirPath = new Path(newPath);
  fs.delete(dirPath, true);
  fs.mkdirs(dirPath);

  Context context = new Context();

  // context.put("hdfs.path", testPath + "/%Y-%m-%d/%H");
  context.put("hdfs.path", newPath);
  context.put("hdfs.filePrefix", fileName);
  context.put("hdfs.rollCount", String.valueOf(rollCount));
  context.put("hdfs.batchSize", String.valueOf(batchSize));
  context.put("hdfs.writeFormat", "Text");
  context.put("hdfs.fileType", "DataStream");
  context.put("serializer", "AVRO_EVENT");

  Configurables.configure(sink, context);

  Channel channel = new MemoryChannel();
  Configurables.configure(channel, context);

  sink.setChannel(channel);
  sink.start();

  Calendar eventDate = Calendar.getInstance();
  List<String> bodies = Lists.newArrayList();

  // push the event batches into channel
  for (i = 1; i < 4; i++) {
    Transaction txn = channel.getTransaction();
    txn.begin();
    for (j = 1; j <= batchSize; j++) {
      Event event = new SimpleEvent();
      eventDate.clear();
      eventDate.set(2011, i, i, i, 0); // yy mm dd
      event.getHeaders().put("timestamp",
          String.valueOf(eventDate.getTimeInMillis()));
      event.getHeaders().put("hostname", "Host" + i);
      String body = "Test." + i + "." + j;
      event.setBody(body.getBytes());
      bodies.add(body);
      channel.put(event);
      totalEvents++;
    }
    txn.commit();
    txn.close();

    // execute sink to process the events
    sink.process();
  }

  sink.stop();

  // loop through all the files generated and check their contains
  FileStatus[] dirStat = fs.listStatus(dirPath);
  Path[] fList = FileUtil.stat2Paths(dirStat);

  // check that the roll happened correctly for the given data
  long expectedFiles = totalEvents / rollCount;
  if (totalEvents % rollCount > 0) expectedFiles++;
  Assert.assertEquals("num files wrong, found: " +
      Lists.newArrayList(fList), expectedFiles, fList.length);
  verifyOutputAvroFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
 
開發者ID:moueimei,項目名稱:flume-release-1.7.0,代碼行數:79,代碼來源:TestHDFSEventSink.java


注:本文中的org.apache.flume.Event.setBody方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。