当前位置: 首页>>代码示例>>Java>>正文


Java KafkaConsumer.assign方法代码示例

本文整理汇总了Java中org.apache.kafka.clients.consumer.KafkaConsumer.assign方法的典型用法代码示例。如果您正苦于以下问题:Java KafkaConsumer.assign方法的具体用法?Java KafkaConsumer.assign怎么用?Java KafkaConsumer.assign使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.clients.consumer.KafkaConsumer的用法示例。


在下文中一共展示了KafkaConsumer.assign方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException, InterruptedException{
    Properties properties = PropertiesUtils.getProps("consumer.properties");
    properties.setProperty("client.id","whtestconsumer");
    properties.setProperty("group.id","whtestconsumer");
    properties.setProperty("bootstrap.servers", "localhost:9092");
    //properties.setProperty("auto.offset.reset", "earliest");


    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
    String topic = "uav-test.monitor.result";
    TopicPartition topicPartition = new TopicPartition(topic, 0);
    List<TopicPartition> topics = Arrays.asList(topicPartition);
    consumer.assign(topics);
    consumer.seekToEnd(topics);
    long current = consumer.position(topicPartition);
    consumer.seek(topicPartition, current-1000);

    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        for (ConsumerRecord<String, String> record : records) {
            System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
        }
        Thread.sleep(1);
    }
}
 
开发者ID:BriData,项目名称:DBus,代码行数:26,代码来源:Kafka.java

示例2: createConsumerAndSubscribe

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
/**
 * Create a new KafkaConsumer based on the passed in ClientConfig, and subscribe to the appropriate
 * partitions.
 */
public KafkaConsumer createConsumerAndSubscribe(final ClientConfig clientConfig) {
    final KafkaConsumer kafkaConsumer = createConsumer(clientConfig);

    // Determine which partitions to subscribe to, for now do all
    final List<PartitionInfo> partitionInfos = kafkaConsumer.partitionsFor(clientConfig.getTopicConfig().getTopicName());

    // Pull out partitions, convert to topic partitions
    final Collection<TopicPartition> topicPartitions = new ArrayList<>();
    for (final PartitionInfo partitionInfo: partitionInfos) {
        // Skip filtered partitions
        if (!clientConfig.isPartitionFiltered(partitionInfo.partition())) {
            topicPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
        }
    }

    // Assign them.
    kafkaConsumer.assign(topicPartitions);

    // Return the kafka consumer.
    return kafkaConsumer;
}
 
开发者ID:SourceLabOrg,项目名称:kafka-webview,代码行数:26,代码来源:KafkaConsumerFactory.java

示例3: readKafkaTopic

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
@GET
@Path("/readKafkaTopic")
public Response readKafkaTopic(Map<String, Object > map) {
    try {
        Properties properties = PropertiesUtils.getProps("consumer.properties");
        properties.setProperty("client.id","readKafkaTopic");
        properties.setProperty("group.id","readKafkaTopic");
        //properties.setProperty("bootstrap.servers", "localhost:9092");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        String topic = map.get("topic").toString();
        //System.out.println("topic="+topic);
        TopicPartition topicPartition = new TopicPartition(topic, 0);
        List<TopicPartition> topics = Arrays.asList(topicPartition);
        consumer.assign(topics);
        consumer.seekToEnd(topics);
        long current = consumer.position(topicPartition);
        long end = current;
        current -= 1000;
        if(current < 0) current = 0;
        consumer.seek(topicPartition, current);
        List<String> result = new ArrayList<>();
        while (current < end) {
            //System.out.println("topic position = "+current);
            ConsumerRecords<String, String> records = consumer.poll(1000);
            for (ConsumerRecord<String, String> record : records) {
                result.add(record.value());
                //System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
            }
            current = consumer.position(topicPartition);
        }
        consumer.close();
        return Response.ok().entity(result).build();
    } catch (Exception e) {
        logger.error("Error encountered while readKafkaTopic with parameter:{}", JSON.toJSONString(map), e);
        return Response.status(204).entity(new Result(-1, e.getMessage())).build();
    }
}
 
开发者ID:BriData,项目名称:DBus,代码行数:38,代码来源:DataTableResource.java

示例4: consumeAllRecordsFromTopic

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
/**
 * This will consume all records from only the partitions given.
 * @param topic Topic to consume from.
 * @param partitionIds Collection of PartitionIds to consume.
 * @return List of ConsumerRecords consumed.
 */
public List<ConsumerRecord<byte[], byte[]>> consumeAllRecordsFromTopic(final String topic, Collection<Integer> partitionIds) {
    // Create topic Partitions
    List<TopicPartition> topicPartitions = new ArrayList<>();
    for (Integer partitionId: partitionIds) {
        topicPartitions.add(new TopicPartition(topic, partitionId));
    }

    // Connect Consumer
    KafkaConsumer<byte[], byte[]> kafkaConsumer =
        kafkaTestServer.getKafkaConsumer(ByteArrayDeserializer.class, ByteArrayDeserializer.class);

    // Assign topic partitions & seek to head of them
    kafkaConsumer.assign(topicPartitions);
    kafkaConsumer.seekToBeginning(topicPartitions);

    // Pull records from kafka, keep polling until we get nothing back
    final List<ConsumerRecord<byte[], byte[]>> allRecords = new ArrayList<>();
    ConsumerRecords<byte[], byte[]> records;
    do {
        // Grab records from kafka
        records = kafkaConsumer.poll(2000L);
        logger.info("Found {} records in kafka", records.count());

        // Add to our array list
        records.forEach(allRecords::add);

    }
    while (!records.isEmpty());

    // close consumer
    kafkaConsumer.close();

    // return all records
    return allRecords;
}
 
开发者ID:salesforce,项目名称:kafka-junit,代码行数:42,代码来源:KafkaTestUtils.java

示例5: retrieveRecordsFromPartitions

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
/**
 * Return a map containing one List of records per partition.
 * This internally creates a Kafka Consumer using the provided consumer properties.
 *
 * @param numPtns
 * @param consumerProperties
 * @return A Map of Partitions(Integer) and the resulting List of messages (byte[]) retrieved
 */
public static Map<Integer, List<byte[]>> retrieveRecordsFromPartitions(String topic, int numPtns,
                                                                 Properties consumerProperties) {

  Map<Integer, List<byte[]>> resultsMap = new HashMap<Integer, List<byte[]>>();
  for (int i = 0; i < numPtns; i++) {
    List<byte[]> partitionResults = new ArrayList<byte[]>();
    resultsMap.put(i, partitionResults);
    KafkaConsumer<String, byte[]> consumer =
        new KafkaConsumer<String, byte[]>(consumerProperties);

    TopicPartition partition = new TopicPartition(topic, i);

    consumer.assign(Arrays.asList(partition));

    ConsumerRecords<String, byte[]> records = consumer.poll(1000);
    for (ConsumerRecord<String, byte[]> record : records) {
      partitionResults.add(record.value());
    }
    consumer.close();
  }
  return resultsMap;
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:31,代码来源:KafkaPartitionTestUtil.java

示例6: getSubscriber

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
/**
 * Get a Subscriber that reads from the given partitions. If partitions is null, the Subscriber reads from the topic
 * corresponding to topicName.
 *
 * @param partitions The list of partitions to read from.
 * @param topicName The topic to subscribe to if partitions are not given.
 * @return The Subscriber reading from the appropriate topic/partitions.
 */
private Subscriber getSubscriber(List<TopicPartition> partitions, String topicName) throws PubSubException {
    Map<String, Object> properties = getProperties(CONSUMER_NAMESPACE, KAFKA_CONSUMER_PROPERTIES);

    // Get the PubSub Consumer specific properties
    Number maxUnackedMessages = getRequiredConfig(Number.class, KafkaConfig.MAX_UNCOMMITTED_MESSAGES);

    // Is autocommit on
    String autoCommit = getRequiredConfig(String.class, KafkaConfig.ENABLE_AUTO_COMMIT);
    boolean enableAutoCommit = KafkaConfig.TRUE.equalsIgnoreCase(autoCommit);

    KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(properties);
    // Subscribe to the topic if partitions are not set in the config.
    if (partitions == null) {
        consumer.subscribe(Collections.singleton(topicName));
    } else {
        consumer.assign(partitions);
    }
    return new KafkaSubscriber(consumer, maxUnackedMessages.intValue(), !enableAutoCommit);
}
 
开发者ID:yahoo,项目名称:bullet-kafka,代码行数:28,代码来源:KafkaPubSub.java

示例7: KafkaSource

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public KafkaSource () throws IOException, PropertyException  {
    Properties configs = ConfUtils.getProps(CONFIG_PROPERTIES);
    statTopic = configs.getProperty(Constants.STATISTIC_TOPIC);
    if (statTopic == null) {
        throw new PropertyException("配置参数文件内容不能为空! " + Constants.STATISTIC_TOPIC);
    }

    statTopicPartition = new TopicPartition(statTopic, 0);

    Properties statProps = ConfUtils.getProps(CONSUMER_PROPERTIES);
    statProps.setProperty("enable.auto.commit", "false");
    List<TopicPartition> topics = Arrays.asList(statTopicPartition);
    consumer = new KafkaConsumer(statProps);
    consumer.assign(topics);

    long beforeOffset = consumer.position(statTopicPartition);
    String offset = configs.getProperty("kafka.offset");
    if (offset.equalsIgnoreCase("none")) {
        ; // do nothing
    } else if  (offset.equalsIgnoreCase("begin")) {
        consumer.seekToBeginning(Lists.newArrayList(statTopicPartition));
    } else if (offset.equalsIgnoreCase("end")) {
        consumer.seekToEnd(Lists.newArrayList(statTopicPartition));
    } else {
        long nOffset = Long.parseLong(offset);
        consumer.seek(statTopicPartition, nOffset);
    }
    long afferOffset = consumer.position(statTopicPartition);
    LOG.info(String.format("init kafkaSoure OK. beforeOffset %d, afferOffset=%d", beforeOffset, afferOffset));
}
 
开发者ID:BriData,项目名称:DBus,代码行数:31,代码来源:KafkaSource.java

示例8: run

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
/**
 * When an object implementing interface <code>Runnable</code> is used
 * to create a thread, starting the thread causes the object's
 * <code>run</code> method to be called in that separately executing
 * thread.
 * <p>
 * The general contract of the method <code>run</code> is that it may
 * take any action whatsoever.
 *
 * @see Thread#run()
 */
@Override
public void run() {
    String group = "kafka-insight-logOffsetListener";
    int sleepTime = 60000;
    KafkaConsumer<Array<Byte>, Array<Byte>> kafkaConsumer = null;

    while (true) {

        try {
            if (null == kafkaConsumer) {
                kafkaConsumer = KafkaUtils.createNewKafkaConsumer(brokersInfo, group);
            }

            Map<String, List<PartitionInfo>> topicPartitionsMap = kafkaConsumer.listTopics();
            for (List<PartitionInfo> partitionInfoList : topicPartitionsMap.values()) {
                for (PartitionInfo partitionInfo : partitionInfoList) {
                    TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
                    Collection<TopicPartition> topicPartitions = Arrays.asList(topicPartition);
                    kafkaConsumer.assign(topicPartitions);
                    kafkaConsumer.seekToEnd(topicPartitions);
                    Long logEndOffset = kafkaConsumer.position(topicPartition);
                    logEndOffsetMap.put(topicPartition, logEndOffset);
                }
            }

            Thread.sleep(sleepTime);

        } catch (Exception e) {
            e.printStackTrace();
            if (null != kafkaConsumer) {
                kafkaConsumer.close();
                kafkaConsumer = null;
            }
        }
    }

}
 
开发者ID:dubin555,项目名称:Kafka-Insight,代码行数:49,代码来源:KafkaOffsetGetter.java

示例9: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) {
    Map<String, Object> configs = new HashMap<String, Object>();
    // bootstrap.servers指定一个或多个broker,不用指定全部的broker,它将自动发现集群中的其余的borker。
    configs.put("bootstrap.servers", "192.168.0.107:9092,192.168.0.108:9092,192.168.0.109:9092");
    configs.put("group.id", "kafka-test");
    // 是否自动确认offset
    configs.put("enable.auto.commit", "false");
    // 自动确认offset的时间间隔
    configs.put("auto.commit.interval.ms", "1000");
    configs.put("session.timeout.ms", "30000");

    configs.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    configs.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(configs);
    String topic = "kafka-test";
    TopicPartition partition0 = new TopicPartition(topic, 0);
    TopicPartition partition1 = new TopicPartition(topic, 1);
    consumer.assign(Arrays.asList(partition0, partition1));
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE);
        for (TopicPartition partition : records.partitions()) {
            List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
            for (ConsumerRecord<String, String> record : partitionRecords) {
                System.out.println(record.offset() + ": " + record.value());
            }
            /* 同步确认某个分区的特定offset */
            long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
            consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
        }
    }
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:33,代码来源:ComsumerDemo4.java

示例10: getLogSize

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
protected long getLogSize(KafkaConsumer<String, Serializable> kafkaConsumer, String topic,
                          int partition) {
    TopicPartition topicPartition = new TopicPartition(topic, partition);
    List<TopicPartition> asList = Arrays.asList(topicPartition);
    kafkaConsumer.assign(asList);
    kafkaConsumer.seekToEnd(asList);
    long logEndOffset = kafkaConsumer.position(topicPartition);
    return logEndOffset;
}
 
开发者ID:warlock-china,项目名称:azeroth,代码行数:10,代码来源:KafkaConsumerCommand.java

示例11: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
  CommandLine commandLine = parseCommandLine(args);
  String brokerStatsZk = commandLine.getOptionValue(BROKERSTATS_ZOOKEEPER);
  String brokerStatsTopic = commandLine.getOptionValue(BROKERSTATS_TOPIC);
  String brokerName = commandLine.getOptionValue(BROKERNAME);
  Set<String> brokerNames = new HashSet<>();
  brokerNames.add(brokerName);

  KafkaConsumer kafkaConsumer = KafkaUtils.getKafkaConsumer(brokerStatsZk,
      "org.apache.kafka.common.serialization.ByteArrayDeserializer",
      "org.apache.kafka.common.serialization.ByteArrayDeserializer", 1);

  long startTimestampInMillis = System.currentTimeMillis() - 86400 * 1000L;
  Map<TopicPartition, Long> offsets = ReplicaStatsManager.getProcessingStartOffsets(
      kafkaConsumer, brokerStatsTopic, startTimestampInMillis);
  kafkaConsumer.unsubscribe();
  kafkaConsumer.assign(offsets.keySet());
  Map<TopicPartition, Long> latestOffsets = kafkaConsumer.endOffsets(offsets.keySet());
  kafkaConsumer.close();

  Map<Long, BrokerStats> brokerStatsMap = new TreeMap<>();
  for (TopicPartition topicPartition : offsets.keySet()) {
    LOG.info("Start processing {}", topicPartition);
    long startOffset = offsets.get(topicPartition);
    long endOffset = latestOffsets.get(topicPartition);

    List<BrokerStats> statsList = processOnePartition(brokerStatsZk, topicPartition,
        startOffset, endOffset, brokerNames);
    for (BrokerStats brokerStats : statsList) {
      brokerStatsMap.put(brokerStats.getTimestamp(), brokerStats);
    }
    LOG.info("Finished processing {}, retrieved {} records", topicPartition, statsList.size());
  }

  for (Map.Entry<Long, BrokerStats> entry: brokerStatsMap.entrySet()) {
    System.out.println(entry.getKey() + " : " + entry.getValue());
  }
}
 
开发者ID:pinterest,项目名称:doctorkafka,代码行数:39,代码来源:BrokerStatsFilter.java

示例12: testProducerAndConsumer

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
/**
 * Test that KafkaServer works as expected!
 *
 * This also serves as a decent example of how to use the producer and consumer.
 */
@Test
public void testProducerAndConsumer() throws Exception {
    final int partitionId = 0;

    // Define our message
    final String expectedKey = "my-key";
    final String expectedValue = "my test message";

    // Define the record we want to produce
    ProducerRecord<String, String> producerRecord = new ProducerRecord<>(topicName, partitionId, expectedKey, expectedValue);

    // Create a new producer
    KafkaProducer<String, String> producer = getKafkaTestServer().getKafkaProducer(StringSerializer.class, StringSerializer.class);

    // Produce it & wait for it to complete.
    Future<RecordMetadata> future = producer.send(producerRecord);
    producer.flush();
    while (!future.isDone()) {
        Thread.sleep(500L);
    }
    logger.info("Produce completed");

    // Close producer!
    producer.close();

    KafkaConsumer<String, String> kafkaConsumer =
        getKafkaTestServer().getKafkaConsumer(StringDeserializer.class, StringDeserializer.class);

    final List<TopicPartition> topicPartitionList = Lists.newArrayList();
    for (final PartitionInfo partitionInfo: kafkaConsumer.partitionsFor(topicName)) {
        topicPartitionList.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
    }
    kafkaConsumer.assign(topicPartitionList);
    kafkaConsumer.seekToBeginning(topicPartitionList);

    // Pull records from kafka, keep polling until we get nothing back
    ConsumerRecords<String, String> records;
    do {
        records = kafkaConsumer.poll(2000L);
        logger.info("Found {} records in kafka", records.count());
        for (ConsumerRecord<String, String> record: records) {
            // Validate
            assertEquals("Key matches expected", expectedKey, record.key());
            assertEquals("value matches expected", expectedValue, record.value());
        }
    }
    while (!records.isEmpty());

    // close consumer
    kafkaConsumer.close();
}
 
开发者ID:salesforce,项目名称:kafka-junit,代码行数:57,代码来源:KafkaTestServerTest.java

示例13: consume

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public void consume(String topic) throws Exception {
    if (maybeSetupPhase(topic, "simple-benchmark-consumer-load", true)) {
        return;
    }

    Properties props = setProduceConsumeProperties("simple-benchmark-consumer");

    KafkaConsumer<Integer, byte[]> consumer = new KafkaConsumer<>(props);

    List<TopicPartition> partitions = getAllPartitions(consumer, topic);
    consumer.assign(partitions);
    consumer.seekToBeginning(partitions);

    Integer key = null;

    long startTime = System.currentTimeMillis();

    while (true) {
        ConsumerRecords<Integer, byte[]> records = consumer.poll(POLL_MS);
        if (records.isEmpty()) {
            if (processedRecords == numRecords)
                break;
        } else {
            for (ConsumerRecord<Integer, byte[]> record : records) {
                processedRecords++;
                processedBytes += record.value().length + Integer.SIZE;
                Integer recKey = record.key();
                if (key == null || key < recKey)
                    key = recKey;
                if (processedRecords == numRecords)
                    break;
            }
        }
        if (processedRecords == numRecords)
            break;
    }

    long endTime = System.currentTimeMillis();

    consumer.close();
    printResults("Consumer Performance [records/latency/rec-sec/MB-sec read]: ", endTime - startTime);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:43,代码来源:SimpleBenchmark.java

示例14: getProcessingStartOffsets

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
/**
 * Find the start offsets for the processing windows. We uses kafka 0.10.1.1 that does not support
 * KafkaConsumer.
 */
public static Map<TopicPartition, Long> getProcessingStartOffsets(KafkaConsumer kafkaConsumer,
                                                                  String brokerStatsTopic,
                                                                  long startTimestampInMillis) {
  List<PartitionInfo> partitionInfos = kafkaConsumer.partitionsFor(brokerStatsTopic);
  LOG.info("Get partition info for {} : {} partitions", brokerStatsTopic, partitionInfos.size());
  List<TopicPartition> topicPartitions = partitionInfos.stream()
      .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition()))
      .collect(Collectors.toList());

  Map<TopicPartition, Long> endOffsets = kafkaConsumer.endOffsets(topicPartitions);
  Map<TopicPartition, Long> beginningOffsets = kafkaConsumer.beginningOffsets(topicPartitions);
  Map<TopicPartition, Long> offsets = new HashMap<>();

  for (TopicPartition tp : topicPartitions) {
    kafkaConsumer.unsubscribe();
    LOG.info("assigning {} to kafkaconsumer", tp);
    List<TopicPartition> tps = new ArrayList<>();
    tps.add(tp);

    kafkaConsumer.assign(tps);
    long endOffset = endOffsets.get(tp);
    long beginningOffset = beginningOffsets.get(tp);
    long offset = Math.max(endOffsets.get(tp) - 10, beginningOffset);
    ConsumerRecord<byte[], byte[]> record = retrieveOneMessage(kafkaConsumer, tp, offset);
    BrokerStats brokerStats = OperatorUtil.deserializeBrokerStats(record);
    if (brokerStats != null) {
      long timestamp = brokerStats.getTimestamp();
      while (timestamp > startTimestampInMillis) {
        offset = Math.max(beginningOffset, offset - 5000);
        record = retrieveOneMessage(kafkaConsumer, tp, offset);
        brokerStats = OperatorUtil.deserializeBrokerStats(record);
        if (brokerStats == null) {
          break;
        }
        timestamp = brokerStats.getTimestamp();
      }
    }
    offsets.put(tp, offset);
    LOG.info("{}: offset = {}, endOffset = {}, # of to-be-processed messages = {}",
        tp, offset, endOffset, endOffset - offset);
  }
  return offsets;
}
 
开发者ID:pinterest,项目名称:doctorkafka,代码行数:48,代码来源:ReplicaStatsManager.java


注:本文中的org.apache.kafka.clients.consumer.KafkaConsumer.assign方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。