當前位置: 首頁>>代碼示例>>Java>>正文


Java MessageAndOffset.nextOffset方法代碼示例

本文整理匯總了Java中kafka.message.MessageAndOffset.nextOffset方法的典型用法代碼示例。如果您正苦於以下問題:Java MessageAndOffset.nextOffset方法的具體用法?Java MessageAndOffset.nextOffset怎麽用?Java MessageAndOffset.nextOffset使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在kafka.message.MessageAndOffset的用法示例。


在下文中一共展示了MessageAndOffset.nextOffset方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: reEmitPartitionBatch

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
/**
 * re-emit the batch described by the meta data provided
 *
 * @param attempt
 * @param collector
 * @param partition
 * @param meta
 */
private void reEmitPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map meta) {
    LOG.info("re-emitting batch, attempt " + attempt);
    String instanceId = (String) meta.get("instanceId");
    if (!_config.forceFromStart || instanceId.equals(_topologyInstanceId)) {
        SimpleConsumer consumer = _connections.register(partition);
        long offset = (Long) meta.get("offset");
        long nextOffset = (Long) meta.get("nextOffset");
        ByteBufferMessageSet msgs = fetchMessages(consumer, partition, offset);
        for (MessageAndOffset msg : msgs) {
            if (offset == nextOffset) {
                break;
            }
            if (offset > nextOffset) {
                throw new RuntimeException("Error when re-emitting batch. overshot the end offset");
            }
            emit(collector, msg.message());
            offset = msg.nextOffset();
        }
    }
}
 
開發者ID:metamx,項目名稱:incubator-storm,代碼行數:29,代碼來源:TridentKafkaEmitter.java

示例2: fill

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
private void fill() {
    long start = System.nanoTime();
    ByteBufferMessageSet msgs = KafkaUtils.fetchMessages(_spoutConfig, _consumer, _partition, _emittedToOffset);
    long end = System.nanoTime();
    long millis = (end - start) / 1000000;
    _fetchAPILatencyMax.update(millis);
    _fetchAPILatencyMean.update(millis);
    _fetchAPICallCount.incr();
    int numMessages = countMessages(msgs);
    _fetchAPIMessageCount.incrBy(numMessages);

    if (numMessages > 0) {
        LOG.info("Fetched " + numMessages + " messages from: " + _partition);
    }
    for (MessageAndOffset msg : msgs) {
        _pending.add(_emittedToOffset);
        _waitingToEmit.add(new MessageAndRealOffset(msg.message(), _emittedToOffset));
        _emittedToOffset = msg.nextOffset();
    }
    if (numMessages > 0) {
        LOG.info("Added " + numMessages + " messages from: " + _partition + " to internal buffers");
    }
}
 
開發者ID:metamx,項目名稱:incubator-storm,代碼行數:24,代碼來源:PartitionManager.java

示例3: reEmitPartitionBatch

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
/**
 * re-emit the batch described by the meta data provided
 *
 * @param attempt
 * @param collector
 * @param partition
 * @param meta
 */
private void reEmitPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map meta) {
    LOG.info("re-emitting batch, attempt " + attempt);
    String instanceId = (String) meta.get("instanceId");
    if (!_config.forceFromStart || instanceId.equals(_topologyInstanceId)) {
        SimpleConsumer consumer = _connections.register(partition);
        long offset = (Long) meta.get("offset");
        long nextOffset = (Long) meta.get("nextOffset");
        ByteBufferMessageSet msgs = null;
        try {
            msgs = fetchMessages(consumer, partition, offset);
        } catch (TopicOffsetOutOfRangeException e) {
            LOG.warn("OffsetOutOfRange during reEmitPartitionBatch, the transaction can not be replayed." +
                    "Returning empty messages");
        }

        if (msgs != null) {
            for (MessageAndOffset msg : msgs) {
                if (offset == nextOffset) {
                    break;
                }
                if (offset > nextOffset) {
                    throw new RuntimeException("Error when re-emitting batch. overshot the end offset");
                }
                emit(collector, msg.message());
                offset = msg.nextOffset();
            }
        }
    }
}
 
開發者ID:redBorder,項目名稱:rb-bi,代碼行數:38,代碼來源:TridentKafkaEmitter.java

示例4: doEmitNewPartitionBatch

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
private Map doEmitNewPartitionBatch(SimpleConsumer consumer, Partition partition, TridentCollector collector, Map lastMeta) {
    long offset;
    if (lastMeta != null) {
        String lastInstanceId = null;
        Map lastTopoMeta = (Map) lastMeta.get("topology");
        if (lastTopoMeta != null) {
            lastInstanceId = (String) lastTopoMeta.get("id");
        }
        if (_config.forceFromStart && !_topologyInstanceId.equals(lastInstanceId)) {
            offset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, _config.startOffsetTime);
        } else {
            offset = (Long) lastMeta.get("nextOffset");
        }
    } else {
        offset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, _config);
    }
    ByteBufferMessageSet msgs = fetchMessages(consumer, partition, offset);
    long endoffset = offset;
    for (MessageAndOffset msg : msgs) {
        emit(collector, msg.message());
        endoffset = msg.nextOffset();
    }
    Map newMeta = new HashMap();
    newMeta.put("offset", offset);
    newMeta.put("nextOffset", endoffset);
    newMeta.put("instanceId", _topologyInstanceId);
    newMeta.put("partition", partition.partition);
    newMeta.put("broker", ImmutableMap.of("host", partition.host.host, "port", partition.host.port));
    newMeta.put("topic", _config.topic);
    newMeta.put("topology", ImmutableMap.of("name", _topologyName, "id", _topologyInstanceId));
    return newMeta;
}
 
開發者ID:metamx,項目名稱:incubator-storm,代碼行數:33,代碼來源:TridentKafkaEmitter.java

示例5: getCurrentMessagePayload

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
byte[] getCurrentMessagePayload() {
  while(currentMessageSetIterator.hasNext()) {
    MessageAndOffset messageAndOffset = currentMessageSetIterator.next();
    if (messageAndOffset.offset() < currentOffset) continue; //old offset, ignore
    Message message = messageAndOffset.message();
    ByteBuffer payload = message.payload();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    currentOffset = messageAndOffset.nextOffset();
    return bytes;
  }
  return null;
}
 
開發者ID:DemandCube,項目名稱:Scribengin,代碼行數:14,代碼來源:KafkaPartitionReader.java

示例6: execute

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
public List<byte[]> execute() throws Exception {
  FetchRequest req = 
      new FetchRequestBuilder().
      clientId(name).
      addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize).
      minBytes(1).
      maxWait(maxWait).
      build();
  
  FetchResponse fetchResponse = consumer.fetch(req);
  if(fetchResponse.hasError()) {
    short errorCode = fetchResponse.errorCode(topic, partitionMetadata.partitionId());
    String msg = "Kafka error code = " + errorCode + ", Partition  " + partitionMetadata.partitionId() ;
    throw new Exception(msg);
  }
  List<byte[]> holder = new ArrayList<byte[]>();
  ByteBufferMessageSet messageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId());
  int count = 0;
  for(MessageAndOffset messageAndOffset : messageSet) {
    if (messageAndOffset.offset() < currentOffset) continue; //old offset, ignore
    ByteBuffer payload = messageAndOffset.message().payload();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    holder.add(bytes);
    currentOffset = messageAndOffset.nextOffset();
    count++;
    if(count == maxRead) break;
  }
  return holder ;
}
 
開發者ID:DemandCube,項目名稱:Scribengin,代碼行數:31,代碼來源:KafkaPartitionReader.java

示例7: run

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
public void run(long maxReads, String topic, int partition, List<KafkaBrokerInfo> brokerInfoList) throws Exception {
    // 獲取指定Topic partition的元數據
    PartitionMetadata metadata = findLeader(brokerInfoList, topic, partition);
    if (metadata == null) {
        System.out.println("Can't find metadata for Topic and Partition. Exiting");
        return;
    }
    String leadBrokerHost = metadata.leader().host();
    int leadBrokerPort = metadata.leader().port();
    String clientName = "Client_" + topic + "_" + partition;
    SimpleConsumer consumer = new SimpleConsumer(leadBrokerHost, leadBrokerPort, 100000, 64 * 1024, clientName);

    long readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.EarliestTime(), clientName);

    int numErrors = 0;
    while (maxReads > 0) {
        int fetchSize = 100000;
        //// 構建獲取數據的請求對象, 給定獲取數據對應的topic、partition、offset以及每次獲取數據最多獲取條數
        kafka.api.FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(topic, partition,
                readOffset, fetchSize).build();
        // 發送請求到Kafka,並獲得返回值
        FetchResponse fetchResponse = consumer.fetch(req);
        // 如果返回對象表示存在異常,進行異常處理,並進行consumer重新連接的操作
        // 當異常連續出現次數超過5次的時候,程序拋出異常
        if (fetchResponse.hasError()) {
            numErrors++;
            if (numErrors > 5)
                break;
            short code = fetchResponse.errorCode(topic, partition);
            System.out.println("Error fetching data from the Broker:" + leadBrokerHost + " Reason: " + code);
            if (code == ErrorMapping.OffsetOutOfRangeCode()) {
                // 異常表示是offset異常,重新獲取偏移量即可
                readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(),
                        clientName);
            }
            continue;
        }
        numErrors = 0;
        long numRead = 0;
        System.out.println("readOffset=" + readOffset);
        //// 接收數據沒有異常,那麽開始對數據進行具體操作
        for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
            long currentOffset = messageAndOffset.offset();
            if (currentOffset < readOffset) {
                continue;
            }
            readOffset = messageAndOffset.nextOffset();
            ByteBuffer payload = messageAndOffset.message().payload();
            byte[] bytes = new byte[payload.limit()];
            payload.get(bytes);
            // 處理數據
            // System.out.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
            numRead++;
        }
        maxReads--;
        if (numRead == 0) {
            try {
                Thread.sleep(1000);
            } catch (InterruptedException ie) {
            }
        }
        System.out.println(numRead);
    }
    System.out.println(maxReads);
    if (consumer != null)
        consumer.close();
}
 
開發者ID:wngn123,項目名稱:wngn-jms-kafka,代碼行數:68,代碼來源:SimpleConsumerExample.java

示例8: run

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
public void run(long maxReads, String topic, int partition, List<KafkaBrokerInfo> brokerInfoList) throws Exception {
    // 獲取指定Topic partition的元數據
    PartitionMetadata metadata = findLeader(brokerInfoList, topic, partition);
    if (metadata == null) {
        System.out.println("Can't find metadata for Topic and Partition. Exiting");
        return;
    }
    String leadBrokerHost = metadata.leader().host();
    int leadBrokerPort = metadata.leader().port();
    String clientName = "Client_" + topic + "_" + partition;
    SimpleConsumer consumer = new SimpleConsumer(leadBrokerHost, leadBrokerPort, 100000, 64 * 1024, clientName);

    long readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.EarliestTime(), clientName);

    int numErrors = 0;
    while (maxReads > 0) {
        int fetchSize = 100000;
        //// 構建獲取數據的請求對象, 給定獲取數據對應的topic、partition、offset以及每次獲取數據最多獲取條數
        kafka.api.FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(topic, partition,
                readOffset, fetchSize).build();
        // 發送請求到Kafka,並獲得返回值
        FetchResponse fetchResponse = consumer.fetch(req);
        // 如果返回對象表示存在異常,進行異常處理,並進行consumer重新連接的操作
        // 當異常連續出現次數超過5次的時候,程序拋出異常
        if (fetchResponse.hasError()) {
            numErrors++;
            if (numErrors > 5)
                break;
            short code = fetchResponse.errorCode(topic, partition);
            System.out.println("Error fetching data from the Broker:" + leadBrokerHost + " Reason: " + code);
            if (code == ErrorMapping.OffsetOutOfRangeCode()) {
                // 異常表示是offset異常,重新獲取偏移量即可
                readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(),
                        clientName);
            }
            continue;
        }
        numErrors = 0;
        long numRead = 0;
        //// 接收數據沒有異常,那麽開始對數據進行具體操作
        for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
            long currentOffset = messageAndOffset.offset();
            if (currentOffset < readOffset) {
                continue;
            }
            readOffset = messageAndOffset.nextOffset();
            ByteBuffer payload = messageAndOffset.message().payload();
            byte[] bytes = new byte[payload.limit()];
            payload.get(bytes);
            // 處理數據
            System.out.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
            numRead++;
            maxReads--;
        }
        if (numRead == 0) {
            try {
                Thread.sleep(1000);
            } catch (InterruptedException ie) {
            }
        }
    }
    if (consumer != null)
        consumer.close();
}
 
開發者ID:wngn123,項目名稱:wngn-jms-kafka,代碼行數:65,代碼來源:SimpleConsumerTest.java

示例9: doEmitNewPartitionBatch

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
private Map doEmitNewPartitionBatch(SimpleConsumer consumer, Partition partition, TridentCollector collector, Map lastMeta) {
    long offset;
    if (lastMeta != null) {
        String lastInstanceId = null;
        Map lastTopoMeta = (Map) lastMeta.get("topology");
        if (lastTopoMeta != null) {
            lastInstanceId = (String) lastTopoMeta.get("id");
        }
        if (_config.forceFromStart && !_topologyInstanceId.equals(lastInstanceId)) {
            offset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, _config.startOffsetTime);
        } else {
            offset = (Long) lastMeta.get("nextOffset");
        }
    } else {
        offset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, _config);
    }

    ByteBufferMessageSet msgs = null;
    try {
        msgs = fetchMessages(consumer, partition, offset);
    } catch (TopicOffsetOutOfRangeException e) {
        long newOffset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, _config);
        LOG.warn("OffsetOutOfRange: Updating offset from offset = " + offset + " to offset = " + newOffset);
        offset = newOffset;
        msgs = KafkaUtils.fetchMessages(_config, consumer, partition, offset);
    }

    long endoffset = offset;
    for (MessageAndOffset msg : msgs) {
        emit(collector, msg.message());
        endoffset = msg.nextOffset();
    }
    Map newMeta = new HashMap();
    newMeta.put("offset", offset);
    newMeta.put("nextOffset", endoffset);
    newMeta.put("instanceId", _topologyInstanceId);
    newMeta.put("partition", partition.partition);
    newMeta.put("broker", ImmutableMap.of("host", partition.host.host, "port", partition.host.port));
    newMeta.put("topic", _config.topic);
    newMeta.put("topology", ImmutableMap.of("name", _topologyName, "id", _topologyInstanceId));
    return newMeta;
}
 
開發者ID:redBorder,項目名稱:rb-bi,代碼行數:43,代碼來源:TridentKafkaEmitter.java

示例10: readEvents

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
/**
 * 	read events.
 * 
 * any errors occurred druing the read process are wrapped as KafkaPartitionReaderException which contains the error code
 * the exception should be processed by consumer.
 * 
 * @return
 * @throws KafkaPartitionReaderException
 */
public List<MessageAndMetadata<byte[],byte[]>> readEvents() throws KafkaPartitionReaderException {
	List<MessageAndMetadata<byte[],byte[]> > events = new ArrayList<MessageAndMetadata<byte[],byte[]>>();
	if(isClosed()){
		return events;
	}
	//LOG.log("Start Reading PartitionReader from ["+readOffset+"] once, Topic["+topic+"] partition["+partition+"]");
	if (nextBatchSizeBytes < 0)
		nextBatchSizeBytes = config.fetchMinBytes();//config.getBatchSizeBytes();

	if (nextBatchSizeBytes == 0) {
		// nextBatchSize only affects one fetch
		nextBatchSizeBytes = config.fetchMinBytes();//config.getBatchSizeBytes();
		return events;
	}

	boolean  hasMessage=false;
	ByteBufferMessageSet messageSet=null;
	do{
		FetchRequest req = new FetchRequestBuilder()
		.clientId(clientId)
		.addFetch(topic, partition, readOffset,
				nextBatchSizeBytes).build();

		FetchResponse fetchResponse = null;
		fetchResponse = consumer.fetch(req);
		if (fetchResponse.hasError()) {
			short code = fetchResponse.errorCode(topic, partition);
			throw new KafkaPartitionReaderException(code);
		} else {
			messageSet = fetchResponse.messageSet(topic, partition);
			hasMessage = messageSet.iterator().hasNext();
			if(!hasMessage)
			nextBatchSizeBytes = Math.min(
					nextBatchSizeBytes * 2,config.fetchMessageMaxBytes()
					/*config.getMaxBatchSizeBytes()*/);
		}
	}while(!hasMessage && !readToTheEnd());//TODO: test readToTheEnd() , consider the config.getMaxBatchSizeBytes().
	if(!hasMessage){
		//set this reader on idle.
		onIdle();
		nextBatchSizeBytes =config.fetchMinBytes();// config.getBatchSizeBytes();
		return events;//return empty events.
	}
	for (MessageAndOffset messageAndOffset : messageSet) {
		long currentOffset = messageAndOffset.offset();
		if (currentOffset < readOffset) {
			continue;
		}
		readOffset = messageAndOffset.nextOffset();
		Message message = messageAndOffset.message();
		MessageAndMetadata<byte[],byte[]> mam=new MessageAndMetadata<byte[],byte[]>(topic, partition, message, readOffset, decoder, decoder);
		events.add(mam);
	
	}
	// nextBatchSize only affects one fetch
	nextBatchSizeBytes = config.fetchMinBytes();//config.getBatchSizeBytes();
	return events;
}
 
開發者ID:pulsarIO,項目名稱:druid-kafka-ext,代碼行數:68,代碼來源:ConsumerPartitionReader.java

示例11: main

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
public static void main(String args[]) throws Exception {


    SimpleConsumer consumer = new SimpleConsumer("localhost", 9092, 100000, 64 * 1024, clientname);

    long offset_in_partition = 0 ;

    try {
      offset_in_partition = readoffset.readLong();
    } catch(EOFException ef) {
      offset_in_partition = getOffset(consumer,topic,partition,kafka.api.OffsetRequest.EarliestTime(),clientname) ;
    }


    int messages = 0 ;
    while (true) {



      FetchRequest req = new FetchRequestBuilder()
          .clientId(clientname)
          .addFetch(topic, partition, offset_in_partition, 100000) // Note: this fetchSize of 100000 might need to be increased if large batches are written to Kafka
          .build();

      FetchResponse fetchResponse = consumer.fetch(req);

      long numRead = 0;
      for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
        long currentOffset = messageAndOffset.offset();
        if (currentOffset < offset_in_partition) {
          continue;
        }
        offset_in_partition = messageAndOffset.nextOffset();
        ByteBuffer payload = messageAndOffset.message().payload();

        byte[] bytes = new byte[payload.limit()];
        payload.get(bytes);
        System.out.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
        readoffset.seek(0);
        readoffset.writeLong(offset_in_partition);
        numRead++;
        messages++ ;

        if (messages == 10) {
          System.out.println("Pretend a crash happened") ;
          System.exit(0);
        }
      }

      if (numRead == 0) {
        try {
          System.out.println("No messages read. Sleep 10 secs") ;
          Thread.sleep(10000);
        } catch (InterruptedException ie) {
        }
      }

    }


  }
 
開發者ID:mdkhanga,項目名稱:my-blog-code,代碼行數:62,代碼來源:KafkaOnceAndOnlyOnceRead.java

示例12: makeNext

import kafka.message.MessageAndOffset; //導入方法依賴的package包/類
protected MessageAndMetadata<K, V> makeNext() {
    FetchedDataChunk currentDataChunk = null;
    // if we don't have an iterator, get one
    Iterator<MessageAndOffset> localCurrent = current.get();
    if (localCurrent == null || !localCurrent.hasNext()) {
        if (consumerTimeoutMs < 0)
            currentDataChunk = Utils.take(channel);
        else {
            currentDataChunk = Utils.poll(channel, consumerTimeoutMs, TimeUnit.MILLISECONDS);
            if (currentDataChunk == null) {
                // reset state to make the iterator re-iterable
                resetState();
                throw new ConsumerTimeoutException();
            }
        }
        if (currentDataChunk.equals(ZookeeperConsumerConnector.shutdownCommand)) {
            logger.debug("Received the shutdown command");
            channel.offer(currentDataChunk);
            return allDone();
        } else {
            currentTopicInfo = currentDataChunk.topicInfo;
            long cdcFetchOffset = currentDataChunk.fetchOffset;
            long ctiConsumeOffset = currentTopicInfo.getConsumeOffset();
            if (ctiConsumeOffset < cdcFetchOffset) {
                logger.error("consumed offset: {} doesn't match fetch offset: {} for {};\n Consumer may lose data",
                        ctiConsumeOffset, cdcFetchOffset, currentTopicInfo);
                currentTopicInfo.resetConsumeOffset(cdcFetchOffset);
            }
            localCurrent = currentDataChunk.messages.iterator();

            current.set(localCurrent);
        }
        // if we just updated the current chunk and it is empty that means the fetch size is too small!
        if (currentDataChunk.messages.validBytes() == 0)
            throw new MessageSizeTooLargeException("Found a message larger than the maximum fetch size of this consumer on topic " +
                    "%s partition %d at fetch offset %d. Increase the fetch size, or decrease the maximum message size the broker will allow.",
                    currentDataChunk.topicInfo.topic, currentDataChunk.topicInfo.partitionId, currentDataChunk.fetchOffset);
    }
    MessageAndOffset item = localCurrent.next();
    // reject the messages that have already been consumed
    while (item.offset < currentTopicInfo.getConsumeOffset() && localCurrent.hasNext()) {
        item = localCurrent.next();
    }
    consumedOffset = item.nextOffset();

    item.message.ensureValid(); // validate checksum of message to ensure it is valid

    return new MessageAndMetadata(currentTopicInfo.topic, currentTopicInfo.partitionId, item.message, item.offset, keyDecoder, valueDecoder);
}
 
開發者ID:bingoohuang,項目名稱:buka,代碼行數:50,代碼來源:ConsumerIterator.java


注:本文中的kafka.message.MessageAndOffset.nextOffset方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。