本文整理汇总了Java中org.apache.flume.channel.MemoryChannel.stop方法的典型用法代码示例。如果您正苦于以下问题:Java MemoryChannel.stop方法的具体用法?Java MemoryChannel.stop怎么用?Java MemoryChannel.stop使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flume.channel.MemoryChannel
的用法示例。
在下文中一共展示了MemoryChannel.stop方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testCensor
import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@Test
public void testCensor() {
MemoryChannel memCh = new MemoryChannel();
memCh.configure(new Context());
memCh.start();
ChannelSelector cs = new ReplicatingChannelSelector();
cs.setChannels(Lists.<Channel>newArrayList(memCh));
ChannelProcessor cp = new ChannelProcessor(cs);
// source config
Map<String, String> cfgMap = Maps.newHashMap();
cfgMap.put("interceptors", "a");
String builderClass = CensoringInterceptor.Builder.class.getName();
cfgMap.put("interceptors.a.type", builderClass);
Context ctx = new Context(cfgMap);
// setup
cp.configure(ctx);
cp.initialize();
Map<String, String> headers = Maps.newHashMap();
String badWord = "scribe";
headers.put("Bad-Words", badWord);
Event event1 = EventBuilder.withBody("test", Charsets.UTF_8, headers);
Assert.assertEquals(badWord, event1.getHeaders().get("Bad-Words"));
cp.processEvent(event1);
Transaction tx = memCh.getTransaction();
tx.begin();
Event event1a = memCh.take();
Assert.assertNull(event1a.getHeaders().get("Bad-Words"));
tx.commit();
tx.close();
// cleanup / shutdown
cp.close();
memCh.stop();
}
示例2: simpleHDFSTest
import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
/**
* This is a very basic test that writes one event to HDFS and reads it back.
*/
@Test
public void simpleHDFSTest() throws EventDeliveryException, IOException {
cluster = new MiniDFSCluster(new Configuration(), 1, true, null);
cluster.waitActive();
String outputDir = "/flume/simpleHDFSTest";
Path outputDirPath = new Path(outputDir);
logger.info("Running test with output dir: {}", outputDir);
FileSystem fs = cluster.getFileSystem();
// ensure output directory is empty
if (fs.exists(outputDirPath)) {
fs.delete(outputDirPath, true);
}
String nnURL = getNameNodeURL(cluster);
logger.info("Namenode address: {}", nnURL);
Context chanCtx = new Context();
MemoryChannel channel = new MemoryChannel();
channel.setName("simpleHDFSTest-mem-chan");
channel.configure(chanCtx);
channel.start();
Context sinkCtx = new Context();
sinkCtx.put("hdfs.path", nnURL + outputDir);
sinkCtx.put("hdfs.fileType", HDFSWriterFactory.DataStreamType);
sinkCtx.put("hdfs.batchSize", Integer.toString(1));
HDFSEventSink sink = new HDFSEventSink();
sink.setName("simpleHDFSTest-hdfs-sink");
sink.configure(sinkCtx);
sink.setChannel(channel);
sink.start();
// create an event
String EVENT_BODY = "yarg!";
channel.getTransaction().begin();
try {
channel.put(EventBuilder.withBody(EVENT_BODY, Charsets.UTF_8));
channel.getTransaction().commit();
} finally {
channel.getTransaction().close();
}
// store event to HDFS
sink.process();
// shut down flume
sink.stop();
channel.stop();
// verify that it's in HDFS and that its content is what we say it should be
FileStatus[] statuses = fs.listStatus(outputDirPath);
Assert.assertNotNull("No files found written to HDFS", statuses);
Assert.assertEquals("Only one file expected", 1, statuses.length);
for (FileStatus status : statuses) {
Path filePath = status.getPath();
logger.info("Found file on DFS: {}", filePath);
FSDataInputStream stream = fs.open(filePath);
BufferedReader reader = new BufferedReader(new InputStreamReader(stream));
String line = reader.readLine();
logger.info("First line in file {}: {}", filePath, line);
Assert.assertEquals(EVENT_BODY, line);
}
if (!KEEP_DATA) {
fs.delete(outputDirPath, true);
}
cluster.shutdown();
cluster = null;
}