当前位置: 首页>>代码示例>>Java>>正文


Java MemoryChannel.setName方法代码示例

本文整理汇总了Java中org.apache.flume.channel.MemoryChannel.setName方法的典型用法代码示例。如果您正苦于以下问题:Java MemoryChannel.setName方法的具体用法?Java MemoryChannel.setName怎么用?Java MemoryChannel.setName使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flume.channel.MemoryChannel的用法示例。


在下文中一共展示了MemoryChannel.setName方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createSparkAvroSink

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
private void createSparkAvroSink() {
	sparkAvroChannel = new MemoryChannel();
	Map<String, String> channelParamters = new HashMap<>();
	channelParamters.put("capacity", "100000");
	channelParamters.put("transactionCapacity", "1000");
	Context channelContext = new Context(channelParamters);
	Configurables.configure(sparkAvroChannel, channelContext);
	String channelName = "SparkAvroMemoryChannel-" + UUID.randomUUID();
	sparkAvroChannel.setName(channelName);

	sparkAvroSink = new AvroSink();
	sparkAvroSink.setName("SparkAvroSink-" + UUID.randomUUID());
	Map<String, String> paramters = new HashMap<>();
	paramters.put("type", "avro");
	paramters.put("hostname", "localhost");
	paramters.put("port", "41111");
	paramters.put("batch-size", "100");
	Context sinkContext = new Context(paramters);
	sparkAvroSink.configure(sinkContext);
	Configurables.configure(sparkAvroSink, sinkContext);
	sparkAvroSink.setChannel(sparkAvroChannel);

	sparkAvroChannel.start();
	sparkAvroSink.start();
}
 
开发者ID:jaibeermalik,项目名称:searchanalytics-bigdata,代码行数:26,代码来源:FlumeAgentServiceImpl.java

示例2: setup

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@Before
public void setup() {
    //        Context channelContext = new Context();
    //        channelContext.put("checkpointDir","data/check");
    //        channelContext.put("dataDirs","data/data");
    //        channelContext.put("capacity","1000");
    //        channelContext.put("transactionCapacity","100");
    //        channelContext.put("checkpointInterval","300");
    //        channel = new FileChannel();
    Context channelContext = new Context();
    channelContext.put("capacity", "10000");
    channelContext.put("transactionCapacity", "5000");
    channel = new MemoryChannel();
    channel.setName("junitChannel");
    Configurables.configure(channel, channelContext);
    channel.start();

    druidSink = new DruidSink();
    druidSink.setChannel(channel);
    druidSink.configure(getMockContext());
    druidSink.start();
}
 
开发者ID:Stratio,项目名称:ingestion,代码行数:23,代码来源:DruidSinkIT.java

示例3: createSink

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
private void createSink() {
		
		channel = new MemoryChannel();
		Map<String, String> channelParamters = new HashMap<>();
		channelParamters.put("capacity", "100000");
		channelParamters.put("transactionCapacity", "1000");
		Context channelContext = new Context(channelParamters);
		Configurables.configure(channel, channelContext);
		channel.setName("HbaseSinkChannel-" + UUID.randomUUID());

		sink = new HBaseSink();
		sink.setName("HbaseSink-" + UUID.randomUUID());
		Map<String, String> paramters = new HashMap<>();
		paramters.put(HBaseSinkConfigurationConstants.CONFIG_TABLE, "searchclicks");
		paramters.put(HBaseSinkConfigurationConstants.CONFIG_COLUMN_FAMILY, new String(HbaseJsonEventSerializer.COLUMFAMILY_CLIENT_BYTES));
		paramters.put(HBaseSinkConfigurationConstants.CONFIG_BATCHSIZE, "1000");
//		paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER, RegexHbaseEventSerializer.class.getName());
//		paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER + "." + RegexHbaseEventSerializer.REGEX_CONFIG, RegexHbaseEventSerializer.REGEX_DEFAULT);
//		paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER + "." + RegexHbaseEventSerializer.IGNORE_CASE_CONFIG, "true");
//		paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER + "." + RegexHbaseEventSerializer.COL_NAME_CONFIG, "json");
		paramters.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER, HbaseJsonEventSerializer.class.getName());

		
		Context sinkContext = new Context(paramters);
		sink.configure(sinkContext);
		sink.setChannel(channel);

		sink.start();
		channel.start();
	}
 
开发者ID:jaibeermalik,项目名称:searchanalytics-bigdata,代码行数:31,代码来源:FlumeHbaseSinkServiceImpl.java

示例4: createSink

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
private void createSink() {
	sink = new ElasticSearchSink();
	sink.setName("ElasticSearchSink-" + UUID.randomUUID());
	channel = new MemoryChannel();
	Map<String, String> channelParamters = new HashMap<>();
	channelParamters.put("capacity", "100000");
	channelParamters.put("transactionCapacity", "1000");
	Context channelContext = new Context(channelParamters);
	Configurables.configure(channel, channelContext);
	channel.setName("ElasticSearchSinkChannel-" + UUID.randomUUID());

	Map<String, String> paramters = new HashMap<>();
	paramters.put(ElasticSearchSinkConstants.HOSTNAMES, "127.0.0.1:9310");
	String indexNamePrefix = "recentlyviewed";
	paramters.put(ElasticSearchSinkConstants.INDEX_NAME, indexNamePrefix);
	paramters.put(ElasticSearchSinkConstants.INDEX_TYPE, "clickevent");
	paramters.put(ElasticSearchSinkConstants.CLUSTER_NAME,
			"jai-testclusterName");
	paramters.put(ElasticSearchSinkConstants.BATCH_SIZE, "10");
	paramters.put(ElasticSearchSinkConstants.SERIALIZER,
			ElasticSearchJsonBodyEventSerializer.class.getName());

	Context sinkContext = new Context(paramters);
	sink.configure(sinkContext);
	sink.setChannel(channel);

	sink.start();
	channel.start();
}
 
开发者ID:jaibeermalik,项目名称:searchanalytics-bigdata,代码行数:30,代码来源:FlumeESSinkServiceImpl.java

示例5: createSink

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
private void createSink() {
	sink = new HDFSEventSink();
	sink.setName("HDFSEventSink-" + UUID.randomUUID());
	channel = new MemoryChannel();
	Map<String, String> channelParamters = new HashMap<>();
	channelParamters.put("capacity", "100000");
	channelParamters.put("transactionCapacity", "1000");
	Context channelContext = new Context(channelParamters);
	Configurables.configure(channel, channelContext);
	channel.setName("HDFSEventSinkChannel-" + UUID.randomUUID());

	Map<String, String> paramters = new HashMap<>();
	paramters.put("hdfs.type", "hdfs");
	String hdfsBasePath = hadoopClusterService.getHDFSUri()
			+ "/searchevents";
	paramters.put("hdfs.path", hdfsBasePath + "/%Y/%m/%d/%H");
	paramters.put("hdfs.filePrefix", "searchevents");
	paramters.put("hdfs.fileType", "DataStream");
	paramters.put("hdfs.rollInterval", "0");
	paramters.put("hdfs.rollSize", "0");
	paramters.put("hdfs.idleTimeout", "1");
	paramters.put("hdfs.rollCount", "0");
	paramters.put("hdfs.batchSize", "1000");
	paramters.put("hdfs.useLocalTimeStamp", "true");

	Context sinkContext = new Context(paramters);
	sink.configure(sinkContext);
	sink.setChannel(channel);

	sink.start();
	channel.start();
}
 
开发者ID:jaibeermalik,项目名称:searchanalytics-bigdata,代码行数:33,代码来源:FlumeHDFSSinkServiceImpl.java

示例6: prepareMongo

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@Before
public void prepareMongo() throws Exception {
    fongo = new Fongo("mongo test server");

    Context mongoContext = new Context();
    mongoContext.put("batchSize", "3");
    mongoContext.put("mappingFile", "/mapping_definition_update.json");
    mongoContext.put("mongoUri", "INJECTED");
    mongoContext.put("dynamic", "true");
    mongoContext.put("updateInsteadReplace", "true");

    mongoSink = new MongoSink();

    injectFongo(mongoSink);
    Configurables.configure(mongoSink, mongoContext);

    Context channelContext = new Context();
    channelContext.put("capacity", "10000");
    channelContext.put("transactionCapacity", "200");

    channel = new MemoryChannel();
    channel.setName("junitChannel");
    Configurables.configure(channel, channelContext);

    mongoSink.setChannel(channel);

    channel.start();
    mongoSink.start();

}
 
开发者ID:Stratio,项目名称:ingestion,代码行数:31,代码来源:MongoSinkUpdateInsteadReplaceTest.java

示例7: prepareMongo

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@Before
public void prepareMongo() throws Exception {
    fongo = new Fongo("mongo test server");

    Context mongoContext = new Context();
    mongoContext.put("batchSize", "3");
    mongoContext.put("mappingFile", "/mapping_definition_1.json");
    mongoContext.put("mongoUri", "INJECTED");
    mongoContext.put("dynamic", "true");

    mongoSink = new MongoSink();

    injectFongo(mongoSink);
    Configurables.configure(mongoSink, mongoContext);

    Context channelContext = new Context();
    channelContext.put("capacity", "10000");
    channelContext.put("transactionCapacity", "200");

    channel = new MemoryChannel();
    channel.setName("junitChannel");
    Configurables.configure(channel, channelContext);

    mongoSink.setChannel(channel);

    channel.start();
    mongoSink.start();
}
 
开发者ID:Stratio,项目名称:ingestion,代码行数:29,代码来源:MongoSinkDynamicTest.java

示例8: prepareMongo

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@Before
public void prepareMongo() throws Exception {
    fongo = new Fongo("mongo test server");

    Context mongoContext = new Context();
    mongoContext.put("batchSize", "1");
    mongoContext.put("mappingFile", "/mapping_definition_1.json");
    mongoContext.put("mongoUri", "INJECTED");

    mongoSink = new MongoSink();

    injectFongo(mongoSink);
    Configurables.configure(mongoSink, mongoContext);

    Context channelContext = new Context();
    channelContext.put("capacity", "10000");
    channelContext.put("transactionCapacity", "200");

    channel = new MemoryChannel();
    channel.setName("junitChannel");
    Configurables.configure(channel, channelContext);

    mongoSink.setChannel(channel);

    channel.start();
    mongoSink.start();
}
 
开发者ID:Stratio,项目名称:ingestion,代码行数:28,代码来源:MongoSinkTest.java

示例9: _do

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
private void _do() throws TTransportException, IOException, InterruptedException {
  final Context context = new Context();
  final InetSocketAddress contactPoint = CassandraTestHelper.getCassandraContactPoint();
  context.put("tables", "keyspaceTestCassandraSinkIT.tableTestCassandraSinkIT");
  context.put("hosts", contactPoint.getAddress().getHostAddress());
  context.put("batchSize", "1");
  context.put("consistency", "QUORUM");

  final File cqlFile = File.createTempFile("flumeTest", "cql");
  cqlFile.deleteOnExit();

  IOUtils.write(
      "CREATE KEYSPACE IF NOT EXISTS keyspaceTestCassandraSinkIT WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };\n\n"
          + "CREATE TABLE IF NOT EXISTS keyspaceTestCassandraSinkIT.tableTestCassandraSinkIT ("
          + "id uuid, bool_field boolean, int_field int, PRIMARY KEY (int_field)"
          + ");\n\n",
      new FileOutputStream(cqlFile));

  context.put("cqlFile", cqlFile.getAbsolutePath());
  sink = new CassandraSink();
  sink.configure(context);

  Context channelContext = new Context();
  channelContext.put("capacity", "10000");
  channelContext.put("transactionCapacity", "200");
  channel = new MemoryChannel();
  channel.setName("junitChannel");
  Configurables.configure(channel, channelContext);
  sink.setChannel(channel);

  sink.start();
  sink.stop();
}
 
开发者ID:Stratio,项目名称:ingestion,代码行数:34,代码来源:CassandraSinkIT.java

示例10: simpleHDFSTest

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
/**
 * This is a very basic test that writes one event to HDFS and reads it back.
 */
@Test
public void simpleHDFSTest() throws EventDeliveryException, IOException {
  cluster = new MiniDFSCluster(new Configuration(), 1, true, null);
  cluster.waitActive();

  String outputDir = "/flume/simpleHDFSTest";
  Path outputDirPath = new Path(outputDir);

  logger.info("Running test with output dir: {}", outputDir);

  FileSystem fs = cluster.getFileSystem();
  // ensure output directory is empty
  if (fs.exists(outputDirPath)) {
    fs.delete(outputDirPath, true);
  }

  String nnURL = getNameNodeURL(cluster);
  logger.info("Namenode address: {}", nnURL);

  Context chanCtx = new Context();
  MemoryChannel channel = new MemoryChannel();
  channel.setName("simpleHDFSTest-mem-chan");
  channel.configure(chanCtx);
  channel.start();

  Context sinkCtx = new Context();
  sinkCtx.put("hdfs.path", nnURL + outputDir);
  sinkCtx.put("hdfs.fileType", HDFSWriterFactory.DataStreamType);
  sinkCtx.put("hdfs.batchSize", Integer.toString(1));

  HDFSEventSink sink = new HDFSEventSink();
  sink.setName("simpleHDFSTest-hdfs-sink");
  sink.configure(sinkCtx);
  sink.setChannel(channel);
  sink.start();

  // create an event
  String EVENT_BODY = "yarg!";
  channel.getTransaction().begin();
  try {
    channel.put(EventBuilder.withBody(EVENT_BODY, Charsets.UTF_8));
    channel.getTransaction().commit();
  } finally {
    channel.getTransaction().close();
  }

  // store event to HDFS
  sink.process();

  // shut down flume
  sink.stop();
  channel.stop();

  // verify that it's in HDFS and that its content is what we say it should be
  FileStatus[] statuses = fs.listStatus(outputDirPath);
  Assert.assertNotNull("No files found written to HDFS", statuses);
  Assert.assertEquals("Only one file expected", 1, statuses.length);

  for (FileStatus status : statuses) {
    Path filePath = status.getPath();
    logger.info("Found file on DFS: {}", filePath);
    FSDataInputStream stream = fs.open(filePath);
    BufferedReader reader = new BufferedReader(new InputStreamReader(stream));
    String line = reader.readLine();
    logger.info("First line in file {}: {}", filePath, line);
    Assert.assertEquals(EVENT_BODY, line);
  }

  if (!KEEP_DATA) {
    fs.delete(outputDirPath, true);
  }

  cluster.shutdown();
  cluster = null;
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:79,代码来源:TestHDFSEventSinkOnMiniCluster.java

示例11: createAvroSourceWithLocalFileRollingSink

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@SuppressWarnings("unused")
private void createAvroSourceWithLocalFileRollingSink() {
	channel = new MemoryChannel();
	String channelName = "AvroSourceMemoryChannel-" + UUID.randomUUID();
	channel.setName(channelName);

	sink = new RollingFileSink();
	sink.setName("RollingFileSink-" + UUID.randomUUID());
	Map<String, String> paramters = new HashMap<>();
	paramters.put("type", "file_roll");
	paramters.put("sink.directory", "target/flumefilelog");
	Context sinkContext = new Context(paramters);
	sink.configure(sinkContext);
	Configurables.configure(channel, sinkContext);
	sink.setChannel(channel);

	final Map<String, String> properties = new HashMap<String, String>();
	properties.put("type", "avro");
	properties.put("bind", "localhost");
	properties.put("port", "44444");
	properties.put("selector.type", "multiplexing");
	properties.put("selector.header", "State");
	properties.put("selector.mapping.VIEWED", channelName);
	properties.put("selector.mapping.default", channelName);

	avroSource = new AvroSource();
	avroSource.setName("AvroSource-" + UUID.randomUUID());
	Context sourceContext = new Context(properties);
	avroSource.configure(sourceContext);
	ChannelSelector selector = new MultiplexingChannelSelector();
	List<Channel> channels = new ArrayList<>();
	channels.add(channel);
	selector.setChannels(channels);
	final Map<String, String> selectorProperties = new HashMap<String, String>();
	properties.put("default", channelName);
	Context selectorContext = new Context(selectorProperties);
	selector.configure(selectorContext);
	ChannelProcessor cp = new ChannelProcessor(selector);
	avroSource.setChannelProcessor(cp);

	sink.start();
	channel.start();
	avroSource.start();
}
 
开发者ID:jaibeermalik,项目名称:searchanalytics-bigdata,代码行数:45,代码来源:FlumeAgentServiceImpl.java

示例12: setup

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@Before
public void setup() throws TTransportException, IOException,
    InterruptedException {
  final Context context = new Context();
  final InetSocketAddress contactPoint = CassandraTestHelper.getCassandraContactPoint();
  context.put("tables", KEYSPACE + "." + TABLE);
  context.put("hosts", contactPoint.getAddress().getHostAddress());
  context.put("batchSize", "1");
  context.put("consistency", "QUORUM");

  Cluster cluster = Cluster.builder()
      .addContactPointsWithPorts(Collections.singletonList(contactPoint))
      .build();
  Session session = cluster.connect();
  session.execute(
      "CREATE KEYSPACE IF NOT EXISTS keyspaceTest WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };");
  session.execute("CREATE TABLE if not exists keyspaceTest.tableTest ("
      + PRIMARY_KEY + " uuid, " + TEXT_FIELD + " text, "
      + VARCHAR_FIELD + " varchar, " + VARINT_FIELD
      + " varint, " + ASCII_FIELD + " ascii, "
      + BOOLEAN_FIELD + " boolean, " + DECIMAL_FIELD
      + " decimal, " + DOUBLE_FIELD + " double, "
      + FLOAT_FIELD + " float, " + INET_FIELD + " inet, "
      + INT_FIELD + " int, " + LIST_FIELD + " list<TEXT>, "
      + MAP_FIELD + " map<TEXT,INT>, " + SET_FIELD
      + " set<TEXT>, " + TIMESTAMP_FIELD + " timestamp, "
      + UUID_FIELD + " uuid, " + BIGINT_FIELD
      + " bigint, PRIMARY KEY (" + PRIMARY_KEY + "));");
  session.close();
  cluster.close();

  sink = new CassandraSink();
  sink.configure(context);

  Context channelContext = new Context();
  channelContext.put("capacity", "10000");
  channelContext.put("transactionCapacity", "200");
  channel = new MemoryChannel();
  channel.setName("junitChannel");
  Configurables.configure(channel, channelContext);
  sink.setChannel(channel);

  sink.start();
  headers = new HashMap<String, String>();
  headers.put(PRIMARY_KEY, UUID.randomUUID().toString());
}
 
开发者ID:Stratio,项目名称:ingestion,代码行数:47,代码来源:CassandraDataTypesIT.java

示例13: setUp

import org.apache.flume.channel.MemoryChannel; //导入方法依赖的package包/类
@Before
public void setUp() {

    conf= ConfigFactory.load();

    ZOOKEEPER_HOSTS = StringUtils.join(conf.getStringList("zookeeper.hosts"), ",");
    KAFKA_HOSTS = conf.getStringList("kafka.hosts");

    LOGGER.info("Using Zookeeper hosts: " + ZOOKEEPER_HOSTS);
    LOGGER.info("Using Zookeeper hosts: " + KAFKA_HOSTS);

    String[] connection = KAFKA_HOSTS.get(0).split(":");

    simpleConsumer = new SimpleConsumer(connection[0], Integer.parseInt(connection[1]), 60000, 1024, CLIENT_ID);

    kafkaSink = new KafkaSink();

    Context kafkaContext = new Context();
    kafkaContext.put("topic", "test");
    kafkaContext.put("writeBody", "false");
    kafkaContext.put("kafka.metadata.broker.list", StringUtils.join(KAFKA_HOSTS, ","));
    kafkaContext.put("kafka.serializer.class", "kafka.serializer.StringEncoder");

    Configurables.configure(kafkaSink, kafkaContext);

    Context channelContext = new Context();
    channelContext.put("capacity", "10000");
    channelContext.put("transactionCapacity", "200");

    channel = new MemoryChannel();
    channel.setName("junitChannel");
    Configurables.configure(channel, channelContext);

    kafkaSink.setChannel(channel);

    channel.start();
    kafkaSink.start();

}
 
开发者ID:Stratio,项目名称:ingestion,代码行数:40,代码来源:KafkaSinkTestIT.java


注:本文中的org.apache.flume.channel.MemoryChannel.setName方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。