当前位置: 首页>>代码示例>>Java>>正文


Java MemoryChannel.start方法代码示例

本文整理汇总了Java中org.apache.flume.channel.MemoryChannel.start方法的典型用法代码示例。如果您正苦于以下问题:Java MemoryChannel.start方法的具体用法?Java MemoryChannel.start怎么用?Java MemoryChannel.start使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flume.channel.MemoryChannel的用法示例。


在下文中一共展示了MemoryChannel.start方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setUpClass

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@BeforeClass
public static void setUpClass() throws Exception {
  port = findFreePort();
  Context context = new Context();
  context.put("port", String.valueOf(port));

  scribeSource = new ScribeSource();
  scribeSource.setName("Scribe Source");

  Configurables.configure(scribeSource, context);

  memoryChannel = new MemoryChannel();
  Configurables.configure(memoryChannel, context);

  List<Channel> channels = new ArrayList<Channel>(1);
  channels.add(memoryChannel);

  ChannelSelector rcs = new ReplicatingChannelSelector();
  rcs.setChannels(channels);

  memoryChannel.start();

  scribeSource.setChannelProcessor(new ChannelProcessor(rcs));
  scribeSource.start();
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:26,代码来源:TestScribeSource.java

示例2: createSparkAvroSink

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
private void createSparkAvroSink() {
	sparkAvroChannel = new MemoryChannel();
	Map<String, String> channelParamters = new HashMap<>();
	channelParamters.put("capacity", "100000");
	channelParamters.put("transactionCapacity", "1000");
	Context channelContext = new Context(channelParamters);
	Configurables.configure(sparkAvroChannel, channelContext);
	String channelName = "SparkAvroMemoryChannel-" + UUID.randomUUID();
	sparkAvroChannel.setName(channelName);

	sparkAvroSink = new AvroSink();
	sparkAvroSink.setName("SparkAvroSink-" + UUID.randomUUID());
	Map<String, String> paramters = new HashMap<>();
	paramters.put("type", "avro");
	paramters.put("hostname", "localhost");
	paramters.put("port", "41111");
	paramters.put("batch-size", "100");
	Context sinkContext = new Context(paramters);
	sparkAvroSink.configure(sinkContext);
	Configurables.configure(sparkAvroSink, sinkContext);
	sparkAvroSink.setChannel(sparkAvroChannel);

	sparkAvroChannel.start();
	sparkAvroSink.start();
}
 
开发者ID:jaibeermalik,项目名称:searchanalytics-bigdata,代码行数:26,代码来源:FlumeAgentServiceImpl.java

示例3: setup

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@Before
public void setup() {
    //        Context channelContext = new Context();
    //        channelContext.put("checkpointDir","data/check");
    //        channelContext.put("dataDirs","data/data");
    //        channelContext.put("capacity","1000");
    //        channelContext.put("transactionCapacity","100");
    //        channelContext.put("checkpointInterval","300");
    //        channel = new FileChannel();
    Context channelContext = new Context();
    channelContext.put("capacity", "10000");
    channelContext.put("transactionCapacity", "5000");
    channel = new MemoryChannel();
    channel.setName("junitChannel");
    Configurables.configure(channel, channelContext);
    channel.start();

    druidSink = new DruidSink();
    druidSink.setChannel(channel);
    druidSink.configure(getMockContext());
    druidSink.start();
}
 
开发者ID:Stratio,项目名称:ingestion,代码行数:23,代码来源:DruidSinkIT.java

示例4: createSink

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
private void createSink() {
		
		channel = new MemoryChannel();
		Map<String, String> channelParamters = new HashMap<>();
		channelParamters.put("capacity", "100000");
		channelParamters.put("transactionCapacity", "1000");
		Context channelContext = new Context(channelParamters);
		Configurables.configure(channel, channelContext);
		channel.setName("HbaseSinkChannel-" + UUID.randomUUID());

		sink = new HBaseSink();
		sink.setName("HbaseSink-" + UUID.randomUUID());
		Map<String, String> paramters = new HashMap<>();
		paramters.put(HBaseSinkConfigurationConstants.CONFIG_TABLE, "searchclicks");
		paramters.put(HBaseSinkConfigurationConstants.CONFIG_COLUMN_FAMILY, new String(HbaseJsonEventSerializer.COLUMFAMILY_CLIENT_BYTES));
		paramters.put(HBaseSinkConfigurationConstants.CONFIG_BATCHSIZE, "1000");
//		paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER, RegexHbaseEventSerializer.class.getName());
//		paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER + "." + RegexHbaseEventSerializer.REGEX_CONFIG, RegexHbaseEventSerializer.REGEX_DEFAULT);
//		paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER + "." + RegexHbaseEventSerializer.IGNORE_CASE_CONFIG, "true");
//		paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER + "." + RegexHbaseEventSerializer.COL_NAME_CONFIG, "json");
		paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER, HbaseJsonEventSerializer.class.getName());

		
		Context sinkContext = new Context(paramters);
		sink.configure(sinkContext);
		sink.setChannel(channel);

		sink.start();
		channel.start();
	}
 
开发者ID:jaibeermalik,项目名称:searchanalytics-bigdata,代码行数:31,代码来源:FlumeHbaseSinkServiceImpl.java

示例5: createSink

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
private void createSink() {
	sink = new ElasticSearchSink();
	sink.setName("ElasticSearchSink-" + UUID.randomUUID());
	channel = new MemoryChannel();
	Map<String, String> channelParamters = new HashMap<>();
	channelParamters.put("capacity", "100000");
	channelParamters.put("transactionCapacity", "1000");
	Context channelContext = new Context(channelParamters);
	Configurables.configure(channel, channelContext);
	channel.setName("ElasticSearchSinkChannel-" + UUID.randomUUID());

	Map<String, String> paramters = new HashMap<>();
	paramters.put(ElasticSearchSinkConstants.HOSTNAMES, "127.0.0.1:9310");
	String indexNamePrefix = "recentlyviewed";
	paramters.put(ElasticSearchSinkConstants.INDEX_NAME, indexNamePrefix);
	paramters.put(ElasticSearchSinkConstants.INDEX_TYPE, "clickevent");
	paramters.put(ElasticSearchSinkConstants.CLUSTER_NAME,
			"jai-testclusterName");
	paramters.put(ElasticSearchSinkConstants.BATCH_SIZE, "10");
	paramters.put(ElasticSearchSinkConstants.SERIALIZER,
			ElasticSearchJsonBodyEventSerializer.class.getName());

	Context sinkContext = new Context(paramters);
	sink.configure(sinkContext);
	sink.setChannel(channel);

	sink.start();
	channel.start();
}
 
开发者ID:jaibeermalik,项目名称:searchanalytics-bigdata,代码行数:30,代码来源:FlumeESSinkServiceImpl.java

示例6: createSink

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
private void createSink() {
	sink = new HDFSEventSink();
	sink.setName("HDFSEventSink-" + UUID.randomUUID());
	channel = new MemoryChannel();
	Map<String, String> channelParamters = new HashMap<>();
	channelParamters.put("capacity", "100000");
	channelParamters.put("transactionCapacity", "1000");
	Context channelContext = new Context(channelParamters);
	Configurables.configure(channel, channelContext);
	channel.setName("HDFSEventSinkChannel-" + UUID.randomUUID());

	Map<String, String> paramters = new HashMap<>();
	paramters.put("hdfs.type", "hdfs");
	String hdfsBasePath = hadoopClusterService.getHDFSUri()
			+ "/searchevents";
	paramters.put("hdfs.path", hdfsBasePath + "/%Y/%m/%d/%H");
	paramters.put("hdfs.filePrefix", "searchevents");
	paramters.put("hdfs.fileType", "DataStream");
	paramters.put("hdfs.rollInterval", "0");
	paramters.put("hdfs.rollSize", "0");
	paramters.put("hdfs.idleTimeout", "1");
	paramters.put("hdfs.rollCount", "0");
	paramters.put("hdfs.batchSize", "1000");
	paramters.put("hdfs.useLocalTimeStamp", "true");

	Context sinkContext = new Context(paramters);
	sink.configure(sinkContext);
	sink.setChannel(channel);

	sink.start();
	channel.start();
}
 
开发者ID:jaibeermalik,项目名称:searchanalytics-bigdata,代码行数:33,代码来源:FlumeHDFSSinkServiceImpl.java

示例7: prepareMongo

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@Before
public void prepareMongo() throws Exception {
    fongo = new Fongo("mongo test server");

    Context mongoContext = new Context();
    mongoContext.put("batchSize", "3");
    mongoContext.put("mappingFile", "/mapping_definition_update.json");
    mongoContext.put("mongoUri", "INJECTED");
    mongoContext.put("dynamic", "true");
    mongoContext.put("updateInsteadReplace", "true");

    mongoSink = new MongoSink();

    injectFongo(mongoSink);
    Configurables.configure(mongoSink, mongoContext);

    Context channelContext = new Context();
    channelContext.put("capacity", "10000");
    channelContext.put("transactionCapacity", "200");

    channel = new MemoryChannel();
    channel.setName("junitChannel");
    Configurables.configure(channel, channelContext);

    mongoSink.setChannel(channel);

    channel.start();
    mongoSink.start();

}
 
开发者ID:Stratio,项目名称:ingestion,代码行数:31,代码来源:MongoSinkUpdateInsteadReplaceTest.java

示例8: prepareMongo

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@Before
public void prepareMongo() throws Exception {
    fongo = new Fongo("mongo test server");

    Context mongoContext = new Context();
    mongoContext.put("batchSize", "3");
    mongoContext.put("mappingFile", "/mapping_definition_1.json");
    mongoContext.put("mongoUri", "INJECTED");
    mongoContext.put("dynamic", "true");

    mongoSink = new MongoSink();

    injectFongo(mongoSink);
    Configurables.configure(mongoSink, mongoContext);

    Context channelContext = new Context();
    channelContext.put("capacity", "10000");
    channelContext.put("transactionCapacity", "200");

    channel = new MemoryChannel();
    channel.setName("junitChannel");
    Configurables.configure(channel, channelContext);

    mongoSink.setChannel(channel);

    channel.start();
    mongoSink.start();
}
 
开发者ID:Stratio,项目名称:ingestion,代码行数:29,代码来源:MongoSinkDynamicTest.java

示例9: prepareMongo

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@Before
public void prepareMongo() throws Exception {
    fongo = new Fongo("mongo test server");

    Context mongoContext = new Context();
    mongoContext.put("batchSize", "1");
    mongoContext.put("mappingFile", "/mapping_definition_1.json");
    mongoContext.put("mongoUri", "INJECTED");

    mongoSink = new MongoSink();

    injectFongo(mongoSink);
    Configurables.configure(mongoSink, mongoContext);

    Context channelContext = new Context();
    channelContext.put("capacity", "10000");
    channelContext.put("transactionCapacity", "200");

    channel = new MemoryChannel();
    channel.setName("junitChannel");
    Configurables.configure(channel, channelContext);

    mongoSink.setChannel(channel);

    channel.start();
    mongoSink.start();
}
 
开发者ID:Stratio,项目名称:ingestion,代码行数:28,代码来源:MongoSinkTest.java

示例10: testCensor

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@Test
public void testCensor() {

  MemoryChannel memCh = new MemoryChannel();
  memCh.configure(new Context());
  memCh.start();

  ChannelSelector cs = new ReplicatingChannelSelector();
  cs.setChannels(Lists.<Channel>newArrayList(memCh));
  ChannelProcessor cp = new ChannelProcessor(cs);

  // source config
  Map<String, String> cfgMap = Maps.newHashMap();
  cfgMap.put("interceptors", "a");
  String builderClass = CensoringInterceptor.Builder.class.getName();
  cfgMap.put("interceptors.a.type", builderClass);
  Context ctx = new Context(cfgMap);

  // setup
  cp.configure(ctx);
  cp.initialize();

  Map<String, String> headers = Maps.newHashMap();
  String badWord = "scribe";
  headers.put("Bad-Words", badWord);
  Event event1 = EventBuilder.withBody("test", Charsets.UTF_8, headers);
  Assert.assertEquals(badWord, event1.getHeaders().get("Bad-Words"));
  cp.processEvent(event1);

  Transaction tx = memCh.getTransaction();
  tx.begin();

  Event event1a = memCh.take();
  Assert.assertNull(event1a.getHeaders().get("Bad-Words"));

  tx.commit();
  tx.close();

  // cleanup / shutdown
  cp.close();
  memCh.stop();
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:43,代码来源:TestCensoringInterceptor.java

示例11: setUpClass

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@BeforeClass
public static void setUpClass() throws Exception {
  selectedPort = findFreePort();

  source = new HTTPSource();
  channel = new MemoryChannel();

  httpsSource = new HTTPSource();
  httpsSource.setName("HTTPS Source");

  Context ctx = new Context();
  ctx.put("capacity", "100");
  Configurables.configure(channel, ctx);

  List<Channel> channels = new ArrayList<Channel>(1);
  channels.add(channel);

  ChannelSelector rcs = new ReplicatingChannelSelector();
  rcs.setChannels(channels);

  source.setChannelProcessor(new ChannelProcessor(rcs));

  channel.start();

  httpsSource.setChannelProcessor(new ChannelProcessor(rcs));

  // HTTP context
  Context context = new Context();

  context.put("port", String.valueOf(selectedPort));
  context.put("host", "0.0.0.0");

  // SSL context props
  Context sslContext = new Context();
  sslContext.put(HTTPSourceConfigurationConstants.SSL_ENABLED, "true");
  sslPort = findFreePort();
  sslContext.put(HTTPSourceConfigurationConstants.CONFIG_PORT,
                 String.valueOf(sslPort));
  sslContext.put(HTTPSourceConfigurationConstants.SSL_KEYSTORE_PASSWORD, "password");
  sslContext.put(HTTPSourceConfigurationConstants.SSL_KEYSTORE,
                 "src/test/resources/jettykeystore");

  Configurables.configure(source, context);
  Configurables.configure(httpsSource, sslContext);
  source.start();
  httpsSource.start();
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:48,代码来源:TestHTTPSource.java

示例12: simpleHDFSTest

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
/**
 * This is a very basic test that writes one event to HDFS and reads it back.
 */
@Test
public void simpleHDFSTest() throws EventDeliveryException, IOException {
  cluster = new MiniDFSCluster(new Configuration(), 1, true, null);
  cluster.waitActive();

  String outputDir = "/flume/simpleHDFSTest";
  Path outputDirPath = new Path(outputDir);

  logger.info("Running test with output dir: {}", outputDir);

  FileSystem fs = cluster.getFileSystem();
  // ensure output directory is empty
  if (fs.exists(outputDirPath)) {
    fs.delete(outputDirPath, true);
  }

  String nnURL = getNameNodeURL(cluster);
  logger.info("Namenode address: {}", nnURL);

  Context chanCtx = new Context();
  MemoryChannel channel = new MemoryChannel();
  channel.setName("simpleHDFSTest-mem-chan");
  channel.configure(chanCtx);
  channel.start();

  Context sinkCtx = new Context();
  sinkCtx.put("hdfs.path", nnURL + outputDir);
  sinkCtx.put("hdfs.fileType", HDFSWriterFactory.DataStreamType);
  sinkCtx.put("hdfs.batchSize", Integer.toString(1));

  HDFSEventSink sink = new HDFSEventSink();
  sink.setName("simpleHDFSTest-hdfs-sink");
  sink.configure(sinkCtx);
  sink.setChannel(channel);
  sink.start();

  // create an event
  String EVENT_BODY = "yarg!";
  channel.getTransaction().begin();
  try {
    channel.put(EventBuilder.withBody(EVENT_BODY, Charsets.UTF_8));
    channel.getTransaction().commit();
  } finally {
    channel.getTransaction().close();
  }

  // store event to HDFS
  sink.process();

  // shut down flume
  sink.stop();
  channel.stop();

  // verify that it's in HDFS and that its content is what we say it should be
  FileStatus[] statuses = fs.listStatus(outputDirPath);
  Assert.assertNotNull("No files found written to HDFS", statuses);
  Assert.assertEquals("Only one file expected", 1, statuses.length);

  for (FileStatus status : statuses) {
    Path filePath = status.getPath();
    logger.info("Found file on DFS: {}", filePath);
    FSDataInputStream stream = fs.open(filePath);
    BufferedReader reader = new BufferedReader(new InputStreamReader(stream));
    String line = reader.readLine();
    logger.info("First line in file {}: {}", filePath, line);
    Assert.assertEquals(EVENT_BODY, line);
  }

  if (!KEEP_DATA) {
    fs.delete(outputDirPath, true);
  }

  cluster.shutdown();
  cluster = null;
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:79,代码来源:TestHDFSEventSinkOnMiniCluster.java

示例13: setUpClass

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@BeforeClass
public static void setUpClass() throws Exception {
  selectedPort = findFreePort();

  source = new HTTPSource();
  channel = new MemoryChannel();

  httpsSource = new HTTPSource();
  httpsSource.setName("HTTPS Source");

  Context ctx = new Context();
  ctx.put("capacity", "100");
  Configurables.configure(channel, ctx);

  List<Channel> channels = new ArrayList<Channel>(1);
  channels.add(channel);

  ChannelSelector rcs = new ReplicatingChannelSelector();
  rcs.setChannels(channels);

  source.setChannelProcessor(new ChannelProcessor(rcs));

  channel.start();

  httpsSource.setChannelProcessor(new ChannelProcessor(rcs));

  // HTTP context
  Context context = new Context();

  context.put("port", String.valueOf(selectedPort));
  context.put("host", "0.0.0.0");

  // SSL context props
  Context sslContext = new Context();
  sslContext.put(HTTPSourceConfigurationConstants.SSL_ENABLED, "true");
  sslPort = findFreePort();
  sslContext.put(HTTPSourceConfigurationConstants.CONFIG_PORT,
    String.valueOf(sslPort));
  sslContext.put(HTTPSourceConfigurationConstants.SSL_KEYSTORE_PASSWORD, "password");
  sslContext.put(HTTPSourceConfigurationConstants.SSL_KEYSTORE, "src/test/resources/jettykeystore");

  Configurables.configure(source, context);
  Configurables.configure(httpsSource, sslContext);
  source.start();
  httpsSource.start();
}
 
开发者ID:mrtheb,项目名称:flume-forgives,代码行数:47,代码来源:TestHTTPSource.java

示例14: createAvroSourceWithLocalFileRollingSink

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@SuppressWarnings("unused")
private void createAvroSourceWithLocalFileRollingSink() {
	channel = new MemoryChannel();
	String channelName = "AvroSourceMemoryChannel-" + UUID.randomUUID();
	channel.setName(channelName);

	sink = new RollingFileSink();
	sink.setName("RollingFileSink-" + UUID.randomUUID());
	Map<String, String> paramters = new HashMap<>();
	paramters.put("type", "file_roll");
	paramters.put("sink.directory", "target/flumefilelog");
	Context sinkContext = new Context(paramters);
	sink.configure(sinkContext);
	Configurables.configure(channel, sinkContext);
	sink.setChannel(channel);

	final Map<String, String> properties = new HashMap<String, String>();
	properties.put("type", "avro");
	properties.put("bind", "localhost");
	properties.put("port", "44444");
	properties.put("selector.type", "multiplexing");
	properties.put("selector.header", "State");
	properties.put("selector.mapping.VIEWED", channelName);
	properties.put("selector.mapping.default", channelName);

	avroSource = new AvroSource();
	avroSource.setName("AvroSource-" + UUID.randomUUID());
	Context sourceContext = new Context(properties);
	avroSource.configure(sourceContext);
	ChannelSelector selector = new MultiplexingChannelSelector();
	List<Channel> channels = new ArrayList<>();
	channels.add(channel);
	selector.setChannels(channels);
	final Map<String, String> selectorProperties = new HashMap<String, String>();
	properties.put("default", channelName);
	Context selectorContext = new Context(selectorProperties);
	selector.configure(selectorContext);
	ChannelProcessor cp = new ChannelProcessor(selector);
	avroSource.setChannelProcessor(cp);

	sink.start();
	channel.start();
	avroSource.start();
}
 
开发者ID:jaibeermalik,项目名称:searchanalytics-bigdata,代码行数:45,代码来源:FlumeAgentServiceImpl.java

示例15: setUp

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@Before
public void setUp() {

    conf= ConfigFactory.load();

    ZOOKEEPER_HOSTS = StringUtils.join(conf.getStringList("zookeeper.hosts"), ",");
    KAFKA_HOSTS = conf.getStringList("kafka.hosts");

    LOGGER.info("Using Zookeeper hosts: " + ZOOKEEPER_HOSTS);
    LOGGER.info("Using Zookeeper hosts: " + KAFKA_HOSTS);

    String[] connection = KAFKA_HOSTS.get(0).split(":");

    simpleConsumer = new SimpleConsumer(connection[0], Integer.parseInt(connection[1]), 60000, 1024, CLIENT_ID);

    kafkaSink = new KafkaSink();

    Context kafkaContext = new Context();
    kafkaContext.put("topic", "test");
    kafkaContext.put("writeBody", "false");
    kafkaContext.put("kafka.metadata.broker.list", StringUtils.join(KAFKA_HOSTS, ","));
    kafkaContext.put("kafka.serializer.class", "kafka.serializer.StringEncoder");

    Configurables.configure(kafkaSink, kafkaContext);

    Context channelContext = new Context();
    channelContext.put("capacity", "10000");
    channelContext.put("transactionCapacity", "200");

    channel = new MemoryChannel();
    channel.setName("junitChannel");
    Configurables.configure(channel, channelContext);

    kafkaSink.setChannel(channel);

    channel.start();
    kafkaSink.start();

}
 
开发者ID:Stratio,项目名称:ingestion,代码行数:40,代码来源:KafkaSinkTestIT.java


注:本文中的org.apache.flume.channel.MemoryChannel.start方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。