当前位置: 首页>>代码示例>>Java>>正文


Java StringDecoder类代码示例

本文整理汇总了Java中kafka.serializer.StringDecoder的典型用法代码示例。如果您正苦于以下问题:Java StringDecoder类的具体用法?Java StringDecoder怎么用?Java StringDecoder使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


StringDecoder类属于kafka.serializer包,在下文中一共展示了StringDecoder类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import kafka.serializer.StringDecoder; //导入依赖的package包/类
public static void main(String[] args) {

        SparkConf conf = new SparkConf()
                .setAppName("kafka-sandbox")
                .setMaster("local[*]");
        JavaSparkContext sc = new JavaSparkContext(conf);
        JavaStreamingContext ssc = new JavaStreamingContext(sc, new Duration(2000));

        Set<String> topics = Collections.singleton("mytopic");
        Map<String, String> kafkaParams = new HashMap<>();
        kafkaParams.put("metadata.broker.list", "localhost:9092");

        JavaPairInputDStream<String, String> directKafkaStream = KafkaUtils.createDirectStream(ssc,
                String.class, String.class, StringDecoder.class, StringDecoder.class, kafkaParams, topics);

        directKafkaStream.foreachRDD(rdd -> {
            System.out.println("--- New RDD with " + rdd.partitions().size()
                    + " partitions and " + rdd.count() + " records");
            rdd.foreach(record -> System.out.println(record._2));
        });

        ssc.start();
        ssc.awaitTermination();
    }
 
开发者ID:aseigneurin,项目名称:kafka-sandbox,代码行数:25,代码来源:SparkStringConsumer.java

示例2: main

import kafka.serializer.StringDecoder; //导入依赖的package包/类
public static void main(String[] args) throws InterruptedException {
  SparkConf sc = new SparkConf().setAppName("POC-Kafka-New");
  
  try(JavaStreamingContext jsc = new JavaStreamingContext(sc, new Duration(2000))) {
    
    JavaPairInputDStream<String, String> stream = KafkaUtils.createDirectStream(
        jsc, String.class, String.class, StringDecoder.class, StringDecoder.class,
        Collections.singletonMap("metadata.broker.list", KAFKA_HOST_PORT),
        Collections.singleton(EXAMPLE_TOPIC));

    JavaDStream<ExampleXML> records = stream.map(t -> t._2()).map(new ParseXML());
    records.foreachRDD(rdd -> System.out.printf("Amount of XMLs: %d\n", rdd.count()));

    jsc.start();
    jsc.awaitTermination();
  }
}
 
开发者ID:ciandt-dev,项目名称:gcp,代码行数:18,代码来源:Spark4KafkaNew.java

示例3: main

import kafka.serializer.StringDecoder; //导入依赖的package包/类
public static void main(String[] args) throws InterruptedException, IOException {
  SparkConf sc = new SparkConf().setAppName("POC-BigQuery");
  
  try(JavaStreamingContext jsc = new JavaStreamingContext(sc, new Duration(60000))) {
    JavaPairInputDStream<String, String> stream = KafkaUtils.createDirectStream(
        jsc, String.class, String.class, StringDecoder.class, StringDecoder.class,
        Collections.singletonMap("metadata.broker.list", KAFKA_HOST_PORT), Collections.singleton(EXAMPLE_TOPIC));

    Configuration conf = new Configuration();
    BigQueryConfiguration.configureBigQueryOutput(conf, BQ_EXAMPLE_TABLE, BQ_EXAMPLE_SCHEMA);
    conf.set("mapreduce.job.outputformat.class", BigQueryOutputFormat.class.getName());

    JavaDStream<ExampleXML> records = stream.map(t -> t._2()).map(new ParseXML());
    records.foreachRDD(rdd -> {
      System.out.printf("Amount of XMLs: %d\n", rdd.count());
      long time = System.currentTimeMillis();
      rdd.mapToPair(new PrepToBQ()).saveAsNewAPIHadoopDataset(conf);
      System.out.printf("Sent to BQ in %fs\n", (System.currentTimeMillis()-time)/1000f);
    });
    
    jsc.start();
    jsc.awaitTermination();
  }
}
 
开发者ID:ciandt-dev,项目名称:gcp,代码行数:25,代码来源:Spark6BigQuery.java

示例4: main

import kafka.serializer.StringDecoder; //导入依赖的package包/类
public static void main(String[] args) throws IOException {
	Flags.setFromCommandLineArgs(THE_OPTIONS, args);

	// 初始化Spark Conf.
	SparkConf conf = new SparkConf().setAppName("A SECTONG Application: Apache Log Analysis with Spark");
	JavaSparkContext sc = new JavaSparkContext(conf);
	JavaStreamingContext jssc = new JavaStreamingContext(sc, Flags.getInstance().getSlideInterval());
	SQLContext sqlContext = new SQLContext(sc);

	// 初始化参数
	HashSet<String> topicsSet = new HashSet<String>(Arrays.asList(Flags.getInstance().getKafka_topic().split(",")));
	HashMap<String, String> kafkaParams = new HashMap<String, String>();
	kafkaParams.put("metadata.broker.list", Flags.getInstance().getKafka_broker());

	// 从Kafka Stream获取数据
	JavaPairInputDStream<String, String> messages = KafkaUtils.createDirectStream(jssc, String.class, String.class,
			StringDecoder.class, StringDecoder.class, kafkaParams, topicsSet);

	JavaDStream<String> lines = messages.map(new Function<Tuple2<String, String>, String>() {
		private static final long serialVersionUID = 5266880065425088203L;

		public String call(Tuple2<String, String> tuple2) {
			return tuple2._2();
		}
	});

	JavaDStream<ApacheAccessLog> accessLogsDStream = lines.flatMap(line -> {
		List<ApacheAccessLog> list = new ArrayList<>();
		try {
			// 映射每一行
			list.add(ApacheAccessLog.parseFromLogLine(line));
			return list;
		} catch (RuntimeException e) {
			return list;
		}
	}).cache();

	accessLogsDStream.foreachRDD(rdd -> {

		// rdd to DataFrame
		DataFrame df = sqlContext.createDataFrame(rdd, ApacheAccessLog.class);
		// 写入Parquet文件
		df.write().partitionBy("ipAddress", "method", "responseCode").mode(SaveMode.Append).parquet(Flags.getInstance().getParquetFile());

		return null;
	});

	// 启动Streaming服务器
	jssc.start(); // 启动计算
	jssc.awaitTermination(); // 等待终止
}
 
开发者ID:sectong,项目名称:SparkToParquet,代码行数:52,代码来源:AppMain.java

示例5: run

import kafka.serializer.StringDecoder; //导入依赖的package包/类
@Override
public void run() {
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(transducer_topic, new Integer(1));

    StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
    StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

    Map<String, List<KafkaStream<String, String>>> consumerMap =
            consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
    KafkaStream<String, String> stream = consumerMap.get(transducer_topic).get(0);
    ConsumerIterator<String, String> it = stream.iterator();
    while (it.hasNext() && bStartConsume){
        transducerDataProcessor.newData(it.next().message());

        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
}
 
开发者ID:unrealinux,项目名称:DataProcessPlatformKafkaJavaSDK,代码行数:23,代码来源:KafkaConsumerTransducer.java

示例6: consume

import kafka.serializer.StringDecoder; //导入依赖的package包/类
void consume() throws Exception {
	// specify the number of consumer threads
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
	topicCountMap.put(KafkaProducer.TOPIC, new Integer(threadsNum));

	// specify data decoder
	StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
	StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

	Map<String, List<KafkaStream<String, String>>> consumerMap = consumer
			.createMessageStreams(topicCountMap, keyDecoder, valueDecoder); // 三个String分别为TOPIC、Key、Value

	// acquire data
	List<KafkaStream<String, String>> streams = consumerMap.get(KafkaProducer.TOPIC);

	// multi-threaded consume
	executor = Executors.newFixedThreadPool(threadsNum);    //create a thread pool
	for (final KafkaStream<String, String> stream : streams) {
		executor.submit(new ConsumerThread(stream));        // run thread
	}
}
 
开发者ID:thulab,项目名称:iotdb-jdbc,代码行数:22,代码来源:KafkaConsumer.java

示例7: collectMq

import kafka.serializer.StringDecoder; //导入依赖的package包/类
public void collectMq(){
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
       topicCountMap.put(Constants.kfTopic, new Integer(1));

       StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
       StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

       Map<String, List<KafkaStream<String, String>>> consumerMap =
               consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
       
       KafkaStream<String, String> stream = consumerMap.get(Constants.kfTopic).get(0);
       ConsumerIterator<String, String> it = stream.iterator();
       MessageAndMetadata<String, String> msgMeta;
       while (it.hasNext()){
       	msgMeta = it.next();
       	super.mqTimer.parseMqText(msgMeta.key(), msgMeta.message());
       	//System.out.println(msgMeta.key()+"\t"+msgMeta.message());
       }
}
 
开发者ID:lrtdc,项目名称:light_drtc,代码行数:20,代码来源:KafkaMqCollect.java

示例8: consumeMessages

import kafka.serializer.StringDecoder; //导入依赖的package包/类
private void consumeMessages() {
    final Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC, 1);
    final StringDecoder decoder =
            new StringDecoder(new VerifiableProperties());
    final Map<String, List<KafkaStream<String, String>>> consumerMap =
            consumer.createMessageStreams(topicCountMap, decoder, decoder);
    final KafkaStream<String, String> stream =
            consumerMap.get(TOPIC).get(0);
    final ConsumerIterator<String, String> iterator = stream.iterator();

    Thread kafkaMessageReceiverThread = new Thread(
            () -> {
                while (iterator.hasNext()) {
                    String msg = iterator.next().message();
                    msg = msg == null ? "<null>" : msg;
                    System.out.println("got message: " + msg);
                    messagesReceived.add(msg);
                }
            },
            "kafkaMessageReceiverThread"
    );
    kafkaMessageReceiverThread.start();

}
 
开发者ID:hubrick,项目名称:vertx-kafka-service,代码行数:26,代码来源:KafkaProducerServiceIntegrationTest.java

示例9: open

import kafka.serializer.StringDecoder; //导入依赖的package包/类
public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
    _collector = spoutOutputCollector;
    Properties props = new Properties();
    props.put("zookeeper.connect", conf.get(OSMIngest.ZOOKEEPERS));
    props.put("group.id", groupId);
    props.put("zookeeper.sync.time.ms", "200");
    props.put("auto.commit.interval.ms", "1000");
    ConsumerConfig consumerConfig = new ConsumerConfig(props);
    ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig);
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(topic, 1);
    Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap, new StringDecoder(new VerifiableProperties()), new StringDecoder(new VerifiableProperties()));
    List<KafkaStream<String, String>> streams = consumerMap.get(topic);
    KafkaStream<String, String> stream = null;
    if (streams.size() == 1) {
        stream = streams.get(0);
    } else {
        log.error("Streams should be of size 1");
    }
    kafkaIterator = stream.iterator();
}
 
开发者ID:geomesa,项目名称:geomesa-tutorials,代码行数:22,代码来源:OSMKafkaSpout.java

示例10: recv

import kafka.serializer.StringDecoder; //导入依赖的package包/类
public void recv() {
    consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig());

    Map<String, Integer> topicMap = new HashMap<String, Integer>();
    topicMap.put(topic, new Integer(1));
    Map<String, List<KafkaStream<String, String>>> streamMap = consumer.createMessageStreams(topicMap, new StringDecoder(null), new StringDecoder(null));

    KafkaStream<String, String> stream = streamMap.get(topic).get(0);

    ConsumerIterator<String, String> it = stream.iterator();
    while (it.hasNext()) {
        MessageAndMetadata<String, String> mm = it.next();
        System.out.println("<<< Got new message");
        System.out.println("<<< key:" + mm.key());
        System.out.println("<<< m: " + mm.message());

    }
}
 
开发者ID:cloudinsight,项目名称:cloudinsight-platform-docker,代码行数:19,代码来源:CollectorTest.java

示例11: kafkaStream

import kafka.serializer.StringDecoder; //导入依赖的package包/类
@Bean
protected KafkaStream<String, float[]> kafkaStream() {

    final String topicName = retrieveTopicNameFromGatewayAddress(gatewayUrl());

    ConsumerConnector consumerConnector =
            Consumer.createJavaConsumerConnector(consumerConfig());
    Map<String, Integer> topicCounts = new HashMap<>();
    topicCounts.put(topicName, 1);
    VerifiableProperties emptyProps = new VerifiableProperties();
    StringDecoder keyDecoder = new StringDecoder(emptyProps);
    FeatureVectorDecoder valueDecoder = new FeatureVectorDecoder();
    Map<String, List<KafkaStream<String, float[]>>> streams =
            consumerConnector.createMessageStreams(topicCounts, keyDecoder, valueDecoder);
    List<KafkaStream<String, float[]>> streamsByTopic = streams.get(topicName);
    Preconditions.checkNotNull(streamsByTopic, String.format("Topic %s not found in streams map.", topicName));
    Preconditions.checkElementIndex(0, streamsByTopic.size(),
            String.format("List of streams of topic %s is empty.", topicName));
    return streamsByTopic.get(0);
}
 
开发者ID:trustedanalytics,项目名称:space-shuttle-demo,代码行数:21,代码来源:KafkaConfiguration.java

示例12: testKafkaLogAppender

import kafka.serializer.StringDecoder; //导入依赖的package包/类
@Test
public void testKafkaLogAppender() {
    Properties consumerProps = new Properties();
    consumerProps.put("zookeeper.connect", zookeeper);
    consumerProps.put("group.id", "kafka-log-appender-test");
    consumerProps.put("auto.offset.reset", "smallest");
    consumerProps.put("schema.registry.url", schemaRegistry);

    Map<String, Integer> topicMap = new HashMap<String, Integer>();
    topicMap.put(topic, 1);

    ConsumerIterator<String, Object> iterator = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumerProps))
            .createMessageStreams(topicMap, new StringDecoder(null), new KafkaAvroDecoder(new VerifiableProperties(consumerProps)))
            .get(topic).get(0).iterator();

    String testMessage = "I am a test message";
    logger.info(testMessage);

    MessageAndMetadata<String, Object> messageAndMetadata = iterator.next();
    GenericRecord logLine = (GenericRecord) messageAndMetadata.message();
    assertEquals(logLine.get("line").toString(), testMessage);
    assertEquals(logLine.get("logtypeid"), KafkaLogAppender.InfoLogTypeId);
    assertNotNull(logLine.get("source"));
    assertEquals(((Map<CharSequence, Object>) logLine.get("timings")).size(), 1);
    assertEquals(((Map<CharSequence, Object>) logLine.get("tag")).size(), 2);
}
 
开发者ID:elodina,项目名称:java-kafka,代码行数:27,代码来源:KafkaLogAppenderTest.java

示例13: EventDispatcher

import kafka.serializer.StringDecoder; //导入依赖的package包/类
public EventDispatcher(
		final Class<TIn> eventClass,
		final CLI options,
		final IEventConsumer<String, TIn> dispatcherCommand) {
	KafkaConfigParser configParser = new KafkaConfigParser();
	configParser.parseConfig(options);
	
	this.consumerConfig = configParser.getConsumerConfig();
	
	this.valueDecoder = new JsonDecoder<>(eventClass);
	this.keyDecoder = new StringDecoder(null);
	
	this.dispatcherCommand = dispatcherCommand;
	
	this.topic = EventBase.getEventId(eventClass);
}
 
开发者ID:mpopp,项目名称:MIB,代码行数:17,代码来源:EventDispatcher.java

示例14: openKafkaStream

import kafka.serializer.StringDecoder; //导入依赖的package包/类
/**
 * 初始化Kafka消费者客户端, 并获取Topic对应的Stream
 */
private void openKafkaStream() {
	logger.info("开始初始化Kafka消费客户端");

	this.consumer = Consumer.createJavaConsumerConnector(getConsumerConfig());

	StringDecoder decoder = new StringDecoder(null);
	Map<String, Integer> topicCountMap = Maps.of(topic, 1);
	Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap,
			decoder, decoder);

	List<KafkaStream<String, String>> streams = consumerMap.get(topic);
	this.stream = streams.get(0);

	Assert.notNull(stream);
}
 
开发者ID:haogrgr,项目名称:haogrgr-test,代码行数:19,代码来源:KafkaMessageConsumer.java

示例15: buildConsumer

import kafka.serializer.StringDecoder; //导入依赖的package包/类
private ConsumerIterator<String, String> buildConsumer(String topic) {
    Properties props = consumerProperties();

    Map<String, Integer> topicCountMap = new HashMap<>();
    topicCountMap.put(topic, 1);
    ConsumerConfig consumerConfig = new ConsumerConfig(props);
    consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);
    Map<String, List<KafkaStream<String, String>>> consumers = consumerConnector.createMessageStreams(topicCountMap, new StringDecoder(null), new StringDecoder(null));
    KafkaStream<String, String> stream = consumers.get(topic).get(0);
    return stream.iterator();
}
 
开发者ID:telstra,项目名称:open-kilda,代码行数:12,代码来源:Original.java


注:本文中的kafka.serializer.StringDecoder类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。