当前位置: 首页>>代码示例>>Java>>正文


Java KStreamBuilder.setApplicationId方法代码示例

本文整理汇总了Java中org.apache.kafka.streams.kstream.KStreamBuilder.setApplicationId方法的典型用法代码示例。如果您正苦于以下问题:Java KStreamBuilder.setApplicationId方法的具体用法?Java KStreamBuilder.setApplicationId怎么用?Java KStreamBuilder.setApplicationId使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.streams.kstream.KStreamBuilder的用法示例。


在下文中一共展示了KStreamBuilder.setApplicationId方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: KStreamTestDriver

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
public KStreamTestDriver(final KStreamBuilder builder,
                         final File stateDir,
                         final Serde<?> keySerde,
                         final Serde<?> valSerde,
                         final long cacheSize) {
    builder.setApplicationId("TestDriver");
    topology = builder.build(null);
    globalTopology = builder.buildGlobalStateTopology();
    final ThreadCache cache = new ThreadCache("testCache", cacheSize, new MockStreamsMetrics(new Metrics()));
    context = new MockProcessorContext(stateDir, keySerde, valSerde, new MockRecordCollector(), cache);
    context.setRecordContext(new ProcessorRecordContext(0, 0, 0, "topic"));
    // init global topology first as it will add stores to the
    // store map that are required for joins etc.
    if (globalTopology != null) {
        initTopology(globalTopology, globalTopology.globalStateStores());
    }
    initTopology(topology, topology.stateStores());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:19,代码来源:KStreamTestDriver.java

示例2: shouldInitializeRestoreConsumerWithOffsetsFromStandbyTasks

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
@Test
public void shouldInitializeRestoreConsumerWithOffsetsFromStandbyTasks() throws Exception {
    final KStreamBuilder builder = new KStreamBuilder();
    builder.setApplicationId(applicationId);
    builder.stream("t1").groupByKey().count("count-one");
    builder.stream("t2").groupByKey().count("count-two");

    final StreamThread thread = new StreamThread(
        builder,
        config,
        clientSupplier,
        applicationId,
        clientId,
        processId,
        metrics,
        mockTime,
        new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
        0);

    final MockConsumer<byte[], byte[]> restoreConsumer = clientSupplier.restoreConsumer;
    restoreConsumer.updatePartitions("stream-thread-test-count-one-changelog",
                                     Collections.singletonList(new PartitionInfo("stream-thread-test-count-one-changelog",
                                                                                 0,
                                                                                 null,
                                                                                 new Node[0],
                                                                                 new Node[0])));
    restoreConsumer.updatePartitions("stream-thread-test-count-two-changelog",
                                     Collections.singletonList(new PartitionInfo("stream-thread-test-count-two-changelog",
                                                                                 0,
                                                                                 null,
                                                                                 new Node[0],
                                                                                 new Node[0])));

    final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
    final TopicPartition t1 = new TopicPartition("t1", 0);
    standbyTasks.put(new TaskId(0, 0), Utils.mkSet(t1));

    thread.setPartitionAssignor(new StreamPartitionAssignor() {
        @Override
        Map<TaskId, Set<TopicPartition>> standbyTasks() {
            return standbyTasks;
        }
    });

    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    thread.rebalanceListener.onPartitionsAssigned(Collections.<TopicPartition>emptyList());

    assertThat(restoreConsumer.assignment(), equalTo(Utils.mkSet(new TopicPartition("stream-thread-test-count-one-changelog", 0))));

    // assign an existing standby plus a new one
    standbyTasks.put(new TaskId(1, 0), Utils.mkSet(new TopicPartition("t2", 0)));
    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    thread.rebalanceListener.onPartitionsAssigned(Collections.<TopicPartition>emptyList());

    assertThat(restoreConsumer.assignment(), equalTo(Utils.mkSet(new TopicPartition("stream-thread-test-count-one-changelog", 0),
                                                                 new TopicPartition("stream-thread-test-count-two-changelog", 0))));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:58,代码来源:StreamThreadTest.java

示例3: shouldCloseSuspendedTasksThatAreNoLongerAssignedToThisStreamThreadBeforeCreatingNewTasks

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
@Test
public void shouldCloseSuspendedTasksThatAreNoLongerAssignedToThisStreamThreadBeforeCreatingNewTasks() throws Exception {
    final KStreamBuilder builder = new KStreamBuilder();
    builder.setApplicationId(applicationId);
    builder.stream("t1").groupByKey().count("count-one");
    builder.stream("t2").groupByKey().count("count-two");

    final StreamThread thread = new StreamThread(
        builder,
        config,
        clientSupplier,
        applicationId,
        clientId,
        processId,
        metrics,
        mockTime,
        new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
        0);
    final MockConsumer<byte[], byte[]> restoreConsumer = clientSupplier.restoreConsumer;
    restoreConsumer.updatePartitions("stream-thread-test-count-one-changelog",
                                     Collections.singletonList(new PartitionInfo("stream-thread-test-count-one-changelog",
                                                                                 0,
                                                                                 null,
                                                                                 new Node[0],
                                                                                 new Node[0])));
    restoreConsumer.updatePartitions("stream-thread-test-count-two-changelog",
                                     Collections.singletonList(new PartitionInfo("stream-thread-test-count-two-changelog",
                                                                                 0,
                                                                                 null,
                                                                                 new Node[0],
                                                                                 new Node[0])));


    final HashMap<TopicPartition, Long> offsets = new HashMap<>();
    offsets.put(new TopicPartition("stream-thread-test-count-one-changelog", 0), 0L);
    offsets.put(new TopicPartition("stream-thread-test-count-two-changelog", 0), 0L);
    restoreConsumer.updateEndOffsets(offsets);
    restoreConsumer.updateBeginningOffsets(offsets);

    final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
    final TopicPartition t1 = new TopicPartition("t1", 0);
    standbyTasks.put(new TaskId(0, 0), Utils.mkSet(t1));

    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final TopicPartition t2 = new TopicPartition("t2", 0);
    activeTasks.put(new TaskId(1, 0), Utils.mkSet(t2));

    thread.setPartitionAssignor(new StreamPartitionAssignor() {
        @Override
        Map<TaskId, Set<TopicPartition>> standbyTasks() {
            return standbyTasks;
        }

        @Override
        Map<TaskId, Set<TopicPartition>> activeTasks() {
            return activeTasks;
        }
    });

    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    thread.rebalanceListener.onPartitionsAssigned(Utils.mkSet(t2));

    // swap the assignment around and make sure we don't get any exceptions
    standbyTasks.clear();
    activeTasks.clear();
    standbyTasks.put(new TaskId(1, 0), Utils.mkSet(t2));
    activeTasks.put(new TaskId(0, 0), Utils.mkSet(t1));

    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    thread.rebalanceListener.onPartitionsAssigned(Utils.mkSet(t1));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:72,代码来源:StreamThreadTest.java

示例4: shouldCloseActiveTasksThatAreAssignedToThisStreamThreadButAssignmentHasChangedBeforeCreatingNewTasks

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
@Test
public void shouldCloseActiveTasksThatAreAssignedToThisStreamThreadButAssignmentHasChangedBeforeCreatingNewTasks() throws Exception {
    final KStreamBuilder builder = new KStreamBuilder();
    builder.setApplicationId(applicationId);
    builder.stream(Pattern.compile("t.*")).to("out");

    final Map<Collection<TopicPartition>, TestStreamTask> createdTasks = new HashMap<>();

    final StreamThread thread = new StreamThread(
        builder,
        config,
        clientSupplier,
        applicationId,
        clientId,
        processId,
        metrics,
        mockTime,
        new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
        0) {

        @Override
        protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitions) {
            final ProcessorTopology topology = builder.build(id.topicGroupId);
            final TestStreamTask task = new TestStreamTask(
                id,
                applicationId,
                partitions,
                topology,
                consumer,
                clientSupplier.getProducer(new HashMap<String, Object>()),
                restoreConsumer,
                config,
                new MockStreamsMetrics(new Metrics()),
                stateDirectory);
            createdTasks.put(partitions, task);
            return task;
        }
    };

    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final TopicPartition t1 = new TopicPartition("t1", 0);
    final Set<TopicPartition> task00Partitions = new HashSet<>();
    task00Partitions.add(t1);
    final TaskId taskId = new TaskId(0, 0);
    activeTasks.put(taskId, task00Partitions);

    thread.setPartitionAssignor(new StreamPartitionAssignor() {
        @Override
        Map<TaskId, Set<TopicPartition>> activeTasks() {
            return activeTasks;
        }
    });

    StreamPartitionAssignor.SubscriptionUpdates subscriptionUpdates = new StreamPartitionAssignor.SubscriptionUpdates();
    Field updatedTopicsField  = subscriptionUpdates.getClass().getDeclaredField("updatedTopicSubscriptions");
    updatedTopicsField.setAccessible(true);
    Set<String> updatedTopics = (Set<String>) updatedTopicsField.get(subscriptionUpdates);
    updatedTopics.add(t1.topic());
    builder.updateSubscriptions(subscriptionUpdates, null);

    // should create task for id 0_0 with a single partition
    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    thread.rebalanceListener.onPartitionsAssigned(task00Partitions);

    final TestStreamTask firstTask = createdTasks.get(task00Partitions);
    assertThat(firstTask.id(), is(taskId));

    // update assignment for the task 0_0 so it now has 2 partitions
    task00Partitions.add(new TopicPartition("t2", 0));
    updatedTopics.add("t2");

    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    thread.rebalanceListener.onPartitionsAssigned(task00Partitions);

    // should close the first task as the assignment has changed
    assertTrue("task should have been closed as assignment has changed", firstTask.closed);
    assertTrue("tasks state manager should have been closed as assignment has changed", firstTask.closedStateManager);
    // should have created a new task for 00
    assertThat(createdTasks.get(task00Partitions).id(), is(taskId));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:81,代码来源:StreamThreadTest.java

示例5: shouldNotViolateAtLeastOnceWhenAnExceptionOccursOnTaskCloseDuringShutdown

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
@Test
public void shouldNotViolateAtLeastOnceWhenAnExceptionOccursOnTaskCloseDuringShutdown() throws Exception {
    final KStreamBuilder builder = new KStreamBuilder();
    builder.setApplicationId(applicationId);
    builder.stream("t1").groupByKey();

    final TestStreamTask testStreamTask = new TestStreamTask(
        new TaskId(0, 0),
        applicationId,
        Utils.mkSet(new TopicPartition("t1", 0)),
        builder.build(0),
        clientSupplier.consumer,
        clientSupplier.getProducer(new HashMap<String, Object>()),
        clientSupplier.restoreConsumer,
        config,
        new MockStreamsMetrics(new Metrics()),
        new StateDirectory(applicationId, config.getString(StreamsConfig.STATE_DIR_CONFIG), mockTime)) {

        @Override
        public void close(final boolean clean) {
            throw new RuntimeException("KABOOM!");
        }
    };

    final StreamThread thread = new StreamThread(
        builder,
        config,
        clientSupplier,
        applicationId,
        clientId,
        processId,
        metrics,
        mockTime,
        new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
        0) {

        @Override
        protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitions) {
            return testStreamTask;
        }
    };

    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    activeTasks.put(testStreamTask.id(), testStreamTask.partitions);

    thread.setPartitionAssignor(new MockStreamsPartitionAssignor(activeTasks));

    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    thread.rebalanceListener.onPartitionsAssigned(testStreamTask.partitions);

    thread.start();
    thread.close();
    thread.join();
    assertFalse("task shouldn't have been committed as there was an exception during shutdown", testStreamTask.committed);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:56,代码来源:StreamThreadTest.java

示例6: shouldNotViolateAtLeastOnceWhenExceptionOccursDuringTaskSuspension

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
@Test
public void shouldNotViolateAtLeastOnceWhenExceptionOccursDuringTaskSuspension() throws Exception {
    final KStreamBuilder builder = new KStreamBuilder();
    builder.setApplicationId(applicationId);
    builder.stream("t1").groupByKey();

    final TestStreamTask testStreamTask = new TestStreamTask(
        new TaskId(0, 0),
        applicationId,
        Utils.mkSet(new TopicPartition("t1", 0)),
        builder.build(0),
        clientSupplier.consumer,
        clientSupplier.getProducer(new HashMap<String, Object>()),
        clientSupplier.restoreConsumer,
        config,
        new MockStreamsMetrics(new Metrics()),
        new StateDirectory(applicationId, config.getString(StreamsConfig.STATE_DIR_CONFIG), mockTime)) {

        @Override
        public void suspend() {
            throw new RuntimeException("KABOOM!");
        }
    };

    final StreamThread thread = new StreamThread(
        builder,
        config,
        clientSupplier,
        applicationId,
        clientId,
        processId,
        metrics,
        mockTime,
        new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
        0) {

        @Override
        protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitions) {
            return testStreamTask;
        }
    };


    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    activeTasks.put(testStreamTask.id(), testStreamTask.partitions);


    thread.setPartitionAssignor(new MockStreamsPartitionAssignor(activeTasks));

    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    thread.rebalanceListener.onPartitionsAssigned(testStreamTask.partitions);
    try {
        thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
        fail("should have thrown exception");
    } catch (final Exception e) {
        // expected
    }
    assertFalse(testStreamTask.committed);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:60,代码来源:StreamThreadTest.java

示例7: shouldNotViolateAtLeastOnceWhenExceptionOccursDuringFlushStateWhileSuspendingState

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
@Test
public void shouldNotViolateAtLeastOnceWhenExceptionOccursDuringFlushStateWhileSuspendingState() throws Exception {
    final KStreamBuilder builder = new KStreamBuilder();
    builder.setApplicationId(applicationId);
    builder.stream("t1").groupByKey();

    final TestStreamTask testStreamTask = new TestStreamTask(
        new TaskId(0, 0),
        applicationId,
        Utils.mkSet(new TopicPartition("t1", 0)),
        builder.build(0),
        clientSupplier.consumer,
        clientSupplier.getProducer(new HashMap<String, Object>()),
        clientSupplier.restoreConsumer,
        config,
        new MockStreamsMetrics(new Metrics()),
        new StateDirectory(applicationId, config.getString(StreamsConfig.STATE_DIR_CONFIG), mockTime)) {

        @Override
        protected void flushState() {
            throw new RuntimeException("KABOOM!");
        }
    };

    final StreamThread thread = new StreamThread(
        builder,
        config,
        clientSupplier,
        applicationId,
        clientId,
        processId,
        metrics,
        mockTime,
        new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) {

        @Override
        protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitions) {
            return testStreamTask;
        }
    };


    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    activeTasks.put(testStreamTask.id(), testStreamTask.partitions);


    thread.setPartitionAssignor(new MockStreamsPartitionAssignor(activeTasks));

    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    thread.rebalanceListener.onPartitionsAssigned(testStreamTask.partitions);
    try {
        thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
        fail("should have thrown exception");
    } catch (final Exception e) {
        // expected
    }
    assertFalse(testStreamTask.committed);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:59,代码来源:StreamThreadTest.java

示例8: shouldNotAddStandbyTaskPartitionsToPartitionsForHost

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
@Test
public void shouldNotAddStandbyTaskPartitionsToPartitionsForHost() throws Exception {
    final Properties props = configProps();
    props.setProperty(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, "1");
    final StreamsConfig config = new StreamsConfig(props);
    final KStreamBuilder builder = new KStreamBuilder();
    final String applicationId = "appId";
    builder.setApplicationId(applicationId);
    builder.stream("topic1").groupByKey().count("count");

    final UUID uuid = UUID.randomUUID();
    final String client = "client1";

    final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client, uuid, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);

    partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client));
    partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(streamThread.config, mockClientSupplier.restoreConsumer));

    final Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
    final Set<TaskId> emptyTasks = Collections.emptySet();
    subscriptions.put(
            "consumer1",
            new PartitionAssignor.Subscription(
                    Collections.singletonList("topic1"),
                    new SubscriptionInfo(uuid, emptyTasks, emptyTasks, userEndPoint).encode()
            )
    );

    subscriptions.put(
            "consumer2",
            new PartitionAssignor.Subscription(
                    Collections.singletonList("topic1"),
                    new SubscriptionInfo(UUID.randomUUID(), emptyTasks, emptyTasks, "other:9090").encode()
            )
    );
    final Set<TopicPartition> allPartitions = Utils.mkSet(t1p0, t1p1, t1p2);
    final Map<String, PartitionAssignor.Assignment> assign = partitionAssignor.assign(metadata, subscriptions);
    final PartitionAssignor.Assignment consumer1Assignment = assign.get("consumer1");
    final AssignmentInfo assignmentInfo = AssignmentInfo.decode(consumer1Assignment.userData());
    final Set<TopicPartition> consumer1partitions = assignmentInfo.partitionsByHost.get(new HostInfo("localhost", 2171));
    final Set<TopicPartition> consumer2Partitions = assignmentInfo.partitionsByHost.get(new HostInfo("other", 9090));
    final HashSet<TopicPartition> allAssignedPartitions = new HashSet<>(consumer1partitions);
    allAssignedPartitions.addAll(consumer2Partitions);
    assertThat(consumer1partitions, not(allPartitions));
    assertThat(consumer2Partitions, not(allPartitions));
    assertThat(allAssignedPartitions, equalTo(allPartitions));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:48,代码来源:StreamPartitionAssignorTest.java

示例9: before

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
@Before
public void before() {
    builder = new KStreamBuilder();
    final KStream<Object, Object> one = builder.stream("topic-one");
    one.groupByKey().count("table-one");

    final KStream<Object, Object> two = builder.stream("topic-two");
    two.groupByKey().count("table-two");

    builder.stream("topic-three")
            .groupByKey()
            .count("table-three");

    builder.merge(one, two).groupByKey().count("merged-table");

    builder.stream("topic-four").mapValues(new ValueMapper<Object, Object>() {
        @Override
        public Object apply(final Object value) {
            return value;
        }
    });

    builder.globalTable("global-topic", "global-table");

    builder.setApplicationId("appId");

    topic1P0 = new TopicPartition("topic-one", 0);
    topic1P1 = new TopicPartition("topic-one", 1);
    topic2P0 = new TopicPartition("topic-two", 0);
    topic2P1 = new TopicPartition("topic-two", 1);
    topic3P0 = new TopicPartition("topic-three", 0);
    topic4P0 = new TopicPartition("topic-four", 0);

    hostOne = new HostInfo("host-one", 8080);
    hostTwo = new HostInfo("host-two", 9090);
    hostThree = new HostInfo("host-three", 7070);
    hostToPartitions = new HashMap<>();
    hostToPartitions.put(hostOne, Utils.mkSet(topic1P0, topic2P1, topic4P0));
    hostToPartitions.put(hostTwo, Utils.mkSet(topic2P0, topic1P1));
    hostToPartitions.put(hostThree, Collections.singleton(topic3P0));

    partitionInfos = Arrays.asList(
            new PartitionInfo("topic-one", 0, null, null, null),
            new PartitionInfo("topic-one", 1, null, null, null),
            new PartitionInfo("topic-two", 0, null, null, null),
            new PartitionInfo("topic-two", 1, null, null, null),
            new PartitionInfo("topic-three", 0, null, null, null),
            new PartitionInfo("topic-four", 0, null, null, null));

    cluster = new Cluster(null, Collections.<Node>emptyList(), partitionInfos, Collections.<String>emptySet(), Collections.<String>emptySet());
    discovery = new StreamsMetadataState(builder, hostOne);
    discovery.onChange(hostToPartitions, cluster);
    partitioner = new StreamPartitioner<String, Object>() {
        @Override
        public Integer partition(final String key, final Object value, final int numPartitions) {
            return 1;
        }
    };
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:60,代码来源:StreamsMetadataStateTest.java


注:本文中的org.apache.kafka.streams.kstream.KStreamBuilder.setApplicationId方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。