当前位置: 首页>>代码示例>>Java>>正文


Java FlinkKafkaConsumer010类代码示例

本文整理汇总了Java中org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010的典型用法代码示例。如果您正苦于以下问题:Java FlinkKafkaConsumer010类的具体用法?Java FlinkKafkaConsumer010怎么用?Java FlinkKafkaConsumer010使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


FlinkKafkaConsumer010类属于org.apache.flink.streaming.connectors.kafka包,在下文中一共展示了FlinkKafkaConsumer010类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doOperation

import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010; //导入依赖的package包/类
@Override
public Object doOperation(final AddElementsFromKafka op, final Context context, final Store store) throws OperationException {
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    if (null != op.getParallelism()) {
        env.setParallelism(op.getParallelism());
    }

    final DataStream<Element> builder =
            env.addSource(new FlinkKafkaConsumer010<>(op.getTopic(), new SimpleStringSchema(), createFlinkProperties(op)))
                    .flatMap(new GafferMapFunction(op.getElementGenerator()));

    if (Boolean.parseBoolean(op.getOption(FlinkConstants.SKIP_REBALANCING))) {
        builder.addSink(new GafferSink(op, store));
    } else {
        builder.rebalance().addSink(new GafferSink(op, store));
    }

    try {
        env.execute(op.getClass().getSimpleName() + "-" + op.getGroupId() + "-" + op.getTopic());
    } catch (final Exception e) {
        throw new OperationException("Failed to add elements from Kafka topic: " + op.getTopic(), e);
    }

    return null;
}
 
开发者ID:gchq,项目名称:Gaffer,代码行数:26,代码来源:AddElementsFromKafkaHandler.java

示例2: getAisMessagesStream

import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010; //导入依赖的package包/类
/**
 * Get the AIS messages stream from file or kafka stream
 * 
 * @param env
 * @param streamSource
 * @param filePathOrTopicProperty the data file path or the topic name of the input kafka stream
 * @param parsingConfig
 * @param areas
 * @return
 */
public static DataStream<AisMessage> getAisMessagesStream(StreamExecutionEnvironment env,
    StreamSourceType streamSource, String filePathOrTopicProperty, String parsingConfig,
    String outputLineDelimiter) {
  DataStream<AisMessage> aisMessagesStream = null;
  String fileOrTopicName = configs.getStringProp(filePathOrTopicProperty);
  switch (streamSource) {
    case KAFKA:
      Properties kafakaProps = getKafkaConsumerProperties();
      // create a Kafka consumer
      FlinkKafkaConsumer010<AisMessage> kafkaConsumer =
          new FlinkKafkaConsumer010<AisMessage>(fileOrTopicName, new AisMessageCsvSchema(
              parsingConfig, outputLineDelimiter), kafakaProps);

      kafkaConsumer.assignTimestampsAndWatermarks(new AisMessagesTimeAssigner());
      aisMessagesStream = env.addSource(kafkaConsumer);
      break;
    case FILE:

      DataStream<AisMessage> aisMessagesStreamWithoutTime =
          env.addSource(new FileLinesStreamSource(fileOrTopicName, parsingConfig))
              .flatMap(new CsvLineToAisMessageMapper(parsingConfig)).setParallelism(1);

      // Assign the timestamp of the AIS messages based on their timestamps
      aisMessagesStream =
          aisMessagesStreamWithoutTime
              .assignTimestampsAndWatermarks(new AisMessagesTimeAssigner());

      break;

    case HDFS:
      aisMessagesStream =
          env.readTextFile(fileOrTopicName).flatMap(new CsvLineToAisMessageMapper(parsingConfig))
              .assignTimestampsAndWatermarks(new AisMessagesTimeAssigner());
      break;
    default:
      return null;
  }
  return aisMessagesStream;
}
 
开发者ID:ehabqadah,项目名称:in-situ-processing-datAcron,代码行数:50,代码来源:AppUtils.java

示例3: main

import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    // Read parameters from command line
    final ParameterTool params = ParameterTool.fromArgs(args);

    if(params.getNumberOfParameters() < 4) {
        System.out.println("\nUsage: FlinkReadKafka --read-topic <topic> --write-topic <topic> --bootstrap.servers <kafka brokers> --group.id <groupid>");
        return;
    }


    // setup streaming environment
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(4, 10000));
    env.enableCheckpointing(300000); // 300 seconds
    env.getConfig().setGlobalJobParameters(params);

    DataStream<String> messageStream = env
            .addSource(new FlinkKafkaConsumer010<>(
                    params.getRequired("read-topic"),
                    new SimpleStringSchema(),
                    params.getProperties())).name("Read from Kafka");

    // setup table environment
    StreamTableEnvironment sTableEnv = TableEnvironment.getTableEnvironment(env);


    // Write JSON payload back to Kafka topic
    messageStream.addSink(new FlinkKafkaProducer010<>(
                params.getRequired("write-topic"),
                new SimpleStringSchema(),
                params.getProperties())).name("Write To Kafka");

    env.execute("FlinkReadWriteKafka");
}
 
开发者ID:kgorman,项目名称:TrafficAnalyzer,代码行数:35,代码来源:FlinkReadWriteKafka.java

示例4: main

import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010; //导入依赖的package包/类
public static void main(String[] args) throws Exception {

		// get an ExecutionEnvironment
		StreamExecutionEnvironment env =
				StreamExecutionEnvironment.getExecutionEnvironment();
		// configure event-time processing
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
		// generate a Watermark every second
		env.getConfig().setAutoWatermarkInterval(1000);

		// configure Kafka consumer
		Properties props = new Properties();
		props.setProperty("zookeeper.connect", "localhost:2181"); // Zookeeper default host:port
		props.setProperty("bootstrap.servers", "localhost:9092"); // Broker default host:port
		props.setProperty("group.id", "myGroup");                 // Consumer group ID
		props.setProperty("auto.offset.reset", "earliest");       // Always read topic from start

		// create a Kafka consumer
		FlinkKafkaConsumer010<TaxiRide> consumer =
				new FlinkKafkaConsumer010<>(
						"cleansedRides",
						new TaxiRideSchema(),
						props);

		// assign a timestamp extractor to the consumer
		consumer.assignTimestampsAndWatermarks(new PopulatPlacesWatermarkOutOfOrdeness(MAX_EVENT_DELAY_DEFAULT));

		DataStream<TaxiRide> rides = env.addSource(consumer);

//		DataStream<TaxiRide> rides = env.addSource(
//				new TaxiRideSource("/Users/dineshat/solo/flink-java-project/nycTaxiRides.gz", MAX_EVENT_DELAY_DEFAULT, SERVING_SPEED_FACTOR_DEFAULT));

		DataStream<Tuple5<Float, Float, Long, Boolean, Integer>> popoularPlaces = rides
				.filter(new TaxiRideCleansing.NewYorkTaxiFilter())
				.map(new MapToGridCell())
				.<KeyedStream<Tuple2<Integer, Boolean>, Tuple2<Integer, Boolean>>>keyBy(0, 1)
				.timeWindow(Time.minutes(15), Time.minutes(5))
				.apply(new RideCounterWindowFunction())
				.filter(new PopularPlaceThresholdFilter(POPULAR_PLACES_COUNTER_THRESHOLD))
				.map(new MapFromGridCellToLatLon());

		Map<String, String> config = new HashMap<>();
		config.put("bulk.flush.max.actions", "10");   // flush inserts after every event
		config.put("cluster.name", "elasticsearch"); // default cluster name

		List<InetSocketAddress> transports = new ArrayList<>();
// set default connection details
		transports.add(new InetSocketAddress(InetAddress.getByName("localhost"), 9300));

		popoularPlaces.addSink(
				new ElasticsearchSink<>(config, transports, new PopularPlaceInserter()))
//				.setParallelism(1)
				.name("ES_Sink");

//		popoularPlaces.print();
		env.execute("Popular place task");
	}
 
开发者ID:dineshtrivedi,项目名称:flink-java-project,代码行数:58,代码来源:PoupularPlacesMain.java

示例5: getKafkaConsumer

import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010; //导入依赖的package包/类
@Override
FlinkKafkaConsumerBase<Row> getKafkaConsumer(String topic, Properties properties, DeserializationSchema<Row> deserializationSchema) {
    return new FlinkKafkaConsumer010<>(topic, deserializationSchema, properties);
}
 
开发者ID:datafibers-community,项目名称:df_data_service,代码行数:5,代码来源:Kafka010AvroTableSource.java

示例6: main

import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
	// parse input arguments
	final ParameterTool parameterTool = ParameterTool.fromArgs(args);

	if (parameterTool.getNumberOfParameters() < 5) {
		System.out.println("Missing parameters!\n" +
				"Usage: Kafka --input-topic <topic> --output-topic <topic> " +
				"--bootstrap.servers <kafka brokers> " +
				"--zookeeper.connect <zk quorum> --group.id <some id> [--prefix <prefix>]");
		return;
	}

	String prefix = parameterTool.get("prefix", "PREFIX:");

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.getConfig().disableSysoutLogging();
	env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(4, 10000));
	env.enableCheckpointing(5000); // create a checkpoint every 5 seconds
	env.getConfig().setGlobalJobParameters(parameterTool); // make parameters available in the web interface

	// make parameters available in the web interface
	env.getConfig().setGlobalJobParameters(parameterTool);

	DataStream<String> input = env
			.addSource(new FlinkKafkaConsumer010<>(
					parameterTool.getRequired("input-topic"),
					new SimpleStringSchema(),
					parameterTool.getProperties()))
			.map(new PrefixingMapper(prefix));

	input.addSink(
			new FlinkKafkaProducer010<>(
					parameterTool.getRequired("output-topic"),
					new SimpleStringSchema(),
					parameterTool.getProperties()));

	env.execute("Kafka 0.10 Example");
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:39,代码来源:Kafka010Example.java


注:本文中的org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。