当前位置: 首页>>代码示例>>Java>>正文


Java MemoryChannel.put方法代码示例

本文整理汇总了Java中org.apache.flume.channel.MemoryChannel.put方法的典型用法代码示例。如果您正苦于以下问题:Java MemoryChannel.put方法的具体用法?Java MemoryChannel.put怎么用?Java MemoryChannel.put使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flume.channel.MemoryChannel的用法示例。


在下文中一共展示了MemoryChannel.put方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testCSV

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@Test
public void testCSV() {
  MemoryChannel ch = new MemoryChannel();
  ch.configure(new Context());

  RollingFileSink s = new RollingFileSink();
  s.setChannel(ch);
  Context ctx = new Context();
  ctx.put("sink.directory", "target/test");
  ctx.put("sink.serializer", CSVAvroSerializer.Builder.class.getName());
  s.configure(ctx);

  String line = "1371782343001,1371782343023,view,65605,201.112.234.35,tgoodwin,/product/24923,/product/60444";
  Event e = EventBuilder.withBody(line, Charsets.UTF_8);

  Transaction txn = ch.getTransaction();
  txn.begin();
  ch.put(e);
  txn.commit();
  txn.close();

  try {
    s.process();
  } catch (EventDeliveryException ex) {
    ex.printStackTrace();
  }
}
 
开发者ID:mpercy,项目名称:flume-rtq-hadoop-summit-2013,代码行数:28,代码来源:TestCSVAvroSerializer.java

示例2: simpleHDFSTest

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
/**
 * This is a very basic test that writes one event to HDFS and reads it back.
 */
@Test
public void simpleHDFSTest() throws EventDeliveryException, IOException {
  cluster = new MiniDFSCluster(new Configuration(), 1, true, null);
  cluster.waitActive();

  String outputDir = "/flume/simpleHDFSTest";
  Path outputDirPath = new Path(outputDir);

  logger.info("Running test with output dir: {}", outputDir);

  FileSystem fs = cluster.getFileSystem();
  // ensure output directory is empty
  if (fs.exists(outputDirPath)) {
    fs.delete(outputDirPath, true);
  }

  String nnURL = getNameNodeURL(cluster);
  logger.info("Namenode address: {}", nnURL);

  Context chanCtx = new Context();
  MemoryChannel channel = new MemoryChannel();
  channel.setName("simpleHDFSTest-mem-chan");
  channel.configure(chanCtx);
  channel.start();

  Context sinkCtx = new Context();
  sinkCtx.put("hdfs.path", nnURL + outputDir);
  sinkCtx.put("hdfs.fileType", HDFSWriterFactory.DataStreamType);
  sinkCtx.put("hdfs.batchSize", Integer.toString(1));

  HDFSEventSink sink = new HDFSEventSink();
  sink.setName("simpleHDFSTest-hdfs-sink");
  sink.configure(sinkCtx);
  sink.setChannel(channel);
  sink.start();

  // create an event
  String EVENT_BODY = "yarg!";
  channel.getTransaction().begin();
  try {
    channel.put(EventBuilder.withBody(EVENT_BODY, Charsets.UTF_8));
    channel.getTransaction().commit();
  } finally {
    channel.getTransaction().close();
  }

  // store event to HDFS
  sink.process();

  // shut down flume
  sink.stop();
  channel.stop();

  // verify that it's in HDFS and that its content is what we say it should be
  FileStatus[] statuses = fs.listStatus(outputDirPath);
  Assert.assertNotNull("No files found written to HDFS", statuses);
  Assert.assertEquals("Only one file expected", 1, statuses.length);

  for (FileStatus status : statuses) {
    Path filePath = status.getPath();
    logger.info("Found file on DFS: {}", filePath);
    FSDataInputStream stream = fs.open(filePath);
    BufferedReader reader = new BufferedReader(new InputStreamReader(stream));
    String line = reader.readLine();
    logger.info("First line in file {}: {}", filePath, line);
    Assert.assertEquals(EVENT_BODY, line);
  }

  if (!KEEP_DATA) {
    fs.delete(outputDirPath, true);
  }

  cluster.shutdown();
  cluster = null;
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:79,代码来源:TestHDFSEventSinkOnMiniCluster.java

示例3: testCloseReopen

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
/**
 * Ensure that when a write throws an IOException we are
 * able to continue to progress in the next process() call.
 * This relies on Transactional rollback semantics for durability and
 * the behavior of the BucketWriter class of close()ing upon IOException.
 */
@Test
public void testCloseReopen()
    throws InterruptedException, LifecycleException, EventDeliveryException, IOException {

  LOG.debug("Starting...");
  final int numBatches = 4;
  final String fileName = "FlumeData";
  final long rollCount = 5;
  final long batchSize = 2;
  String newPath = testPath + "/singleBucket";
  int i = 1, j = 1;

  HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
  sink = new HDFSEventSink(badWriterFactory);

  // clear the test directory
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  Path dirPath = new Path(newPath);
  fs.delete(dirPath, true);
  fs.mkdirs(dirPath);

  Context context = new Context();

  context.put("hdfs.path", newPath);
  context.put("hdfs.filePrefix", fileName);
  context.put("hdfs.rollCount", String.valueOf(rollCount));
  context.put("hdfs.batchSize", String.valueOf(batchSize));
  context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);

  Configurables.configure(sink, context);

  MemoryChannel channel = new MemoryChannel();
  Configurables.configure(channel, new Context());

  sink.setChannel(channel);
  sink.start();

  Calendar eventDate = Calendar.getInstance();
  List<String> bodies = Lists.newArrayList();
  // push the event batches into channel
  for (i = 1; i < numBatches; i++) {
    channel.getTransaction().begin();
    try {
      for (j = 1; j <= batchSize; j++) {
        Event event = new SimpleEvent();
        eventDate.clear();
        eventDate.set(2011, i, i, i, 0); // yy mm dd
        event.getHeaders().put("timestamp",
            String.valueOf(eventDate.getTimeInMillis()));
        event.getHeaders().put("hostname", "Host" + i);
        String body = "Test." + i + "." + j;
        event.setBody(body.getBytes());
        bodies.add(body);
        // inject fault
        event.getHeaders().put("fault-until-reopen", "");
        channel.put(event);
      }
      channel.getTransaction().commit();
    } finally {
      channel.getTransaction().close();
    }
    LOG.info("execute sink to process the events: " + sink.process());
  }
  LOG.info("clear any events pending due to errors: " + sink.process());
  sink.stop();

  verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:76,代码来源:TestHDFSEventSink.java


注:本文中的org.apache.flume.channel.MemoryChannel.put方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。