当前位置: 首页>>代码示例>>Java>>正文


Java MiniDFSCluster.shutdown方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.shutdown方法的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSCluster.shutdown方法的具体用法?Java MiniDFSCluster.shutdown怎么用?Java MiniDFSCluster.shutdown使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.MiniDFSCluster的用法示例。


在下文中一共展示了MiniDFSCluster.shutdown方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testMultipleNamespacesConfigured

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testMultipleNamespacesConfigured() throws Exception {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();
    DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote");
    DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote");

    fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf);
    Assert.assertEquals(2, fs.getResolvedNNAddr().length);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestWebHDFSForHA.java

示例2: testMerge

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
public void testMerge() throws Exception {
  MiniDFSCluster dfsCluster = null;
  MiniMRClientCluster mrCluster = null;
  FileSystem fileSystem = null;
  try {
    Configuration conf = new Configuration();
    // Start the mini-MR and mini-DFS clusters
    dfsCluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(NUM_HADOOP_DATA_NODES).build();
    fileSystem = dfsCluster.getFileSystem();
    mrCluster = MiniMRClientClusterFactory.create(this.getClass(),
                                               NUM_HADOOP_DATA_NODES, conf);
    // Generate input.
    createInput(fileSystem);
    // Run the test.
    runMergeTest(new JobConf(mrCluster.getConfig()), fileSystem);
  } finally {
    if (dfsCluster != null) {
      dfsCluster.shutdown();
    }
    if (mrCluster != null) {
      mrCluster.stop();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestMerge.java

示例3: testCreateWithNoDN

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test for catching "no datanode" IOException, when to create a file
 * but datanode is not running for some reason.
 */
@Test(timeout=300000)
public void testCreateWithNoDN() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    cluster.waitActive();
    FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);
    fs.create(new Path("/testnodatanode"));
    Assert.fail("No exception was thrown");
  } catch (IOException ex) {
    GenericTestUtils.assertExceptionContains("Failed to find datanode", ex);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestWebHDFS.java

示例4: testDTInInsecureClusterWithFallback

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testDTInInsecureClusterWithFallback()
    throws IOException, URISyntaxException {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  conf.setBoolean(CommonConfigurationKeys
      .IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, true);
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);
    Assert.assertNull(webHdfs.getDelegationToken(null));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestWebHDFS.java

示例5: testNotConfigured

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/** 
 * Test that an exception is thrown if a journal class doesn't exist
 * in the configuration 
 */
@Test(expected=IllegalArgumentException.class)
public void testNotConfigured() throws Exception {
  MiniDFSCluster cluster = null;
  Configuration conf = new Configuration();

  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
           "dummy://test");
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestGenericJournalConf.java

示例6: testMoverCliWithFederationHA

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testMoverCliWithFederationHA() throws Exception {
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(3))
      .numDataNodes(0).build();
  final Configuration conf = new HdfsConfiguration();
  DFSTestUtil.setFederatedHAConfiguration(cluster, conf);
  try {
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(3, namenodes.size());

    Iterator<URI> iter = namenodes.iterator();
    URI nn1 = iter.next();
    URI nn2 = iter.next();
    URI nn3 = iter.next();
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", nn1 + "/foo", nn1 + "/bar", nn2 + "/foo/bar", nn3 + "/foobar");
    Assert.assertEquals(3, movePaths.size());
    checkMovePaths(movePaths.get(nn1), new Path("/foo"), new Path("/bar"));
    checkMovePaths(movePaths.get(nn2), new Path("/foo/bar"));
    checkMovePaths(movePaths.get(nn3), new Path("/foobar"));
  } finally {
     cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestMover.java

示例7: testClassDoesntExist

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test that an exception is thrown if a journal class doesn't
 * exist in the classloader.
 */
@Test(expected=IllegalArgumentException.class)
public void testClassDoesntExist() throws Exception {
  MiniDFSCluster cluster = null;
  Configuration conf = new Configuration();

  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",
           "org.apache.hadoop.nonexistent");
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
           "dummy://test");

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestGenericJournalConf.java

示例8: testStart

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testStart() throws IOException {
  // Start minicluster
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1)
      .build();
  cluster.waitActive();
  
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);
  
  // Start nfs
  Nfs3 nfs3 = new Nfs3(config);
  nfs3.startServiceInternal(false);

  RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd()
      .getRpcProgram();
  mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));
  
  RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
  nfsd.nullProcedure();
  
  cluster.shutdown();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestMountd.java

示例9: testWebHdfsDeleteSnapshot

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test snapshot deletion through WebHdfs
 */
@Test
public void testWebHdfsDeleteSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);
    dfs.allowSnapshot(foo);

    webHdfs.createSnapshot(foo, "s1");
    final Path spath = webHdfs.createSnapshot(foo, null);
    Assert.assertTrue(webHdfs.exists(spath));
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));

    // delete the two snapshots
    webHdfs.deleteSnapshot(foo, "s1");
    Assert.assertFalse(webHdfs.exists(s1path));
    webHdfs.deleteSnapshot(foo, spath.getName());
    Assert.assertFalse(webHdfs.exists(spath));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestWebHDFS.java

示例10: testZeroBlockSize

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * In this test case, I have created an image with a file having
 * preferredblockSize = 0. We are trying to read this image (since file with
 * preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode 
 * after 2.6 version will not be able to read this particular file.
 * See HDFS-7788 for more information.
 * @throws Exception
 */
@Test
public void testZeroBlockSize() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-zero-block-size");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));
  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, 
      nameDir.getAbsolutePath());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(false)
      .manageDataDfsDirs(false)
      .manageNameDfsDirs(false)
      .waitSafeMode(false)
      .startupOption(StartupOption.UPGRADE)
      .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/zeroBlockFile");
    assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
    assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
  } finally {
    cluster.shutdown();
    //Clean up
    FileUtil.fullyDelete(dfsDir);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestFSImage.java

示例11: testNameNodeMultipleSwitchesUsingBKJM

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * NameNode should load the edits correctly if the applicable edits are
 * present in the BKJM.
 */
@Test
public void testNameNodeMultipleSwitchesUsingBKJM() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
        .createJournalURI("/correctEditLogSelection").toString());
    BKJMUtil.addJournalManagerDefinition(conf);

    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0)
        .manageNameDfsSharedDirs(false).build();
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);
    cluster.waitActive();
    cluster.transitionToActive(0);
    nn1.getRpcServer().rollEditLog(); // Roll Edits from current Active.
    // Transition to standby current active gracefully.
    cluster.transitionToStandby(0);
    // Make the other Active and Roll edits multiple times
    cluster.transitionToActive(1);
    nn2.getRpcServer().rollEditLog();
    nn2.getRpcServer().rollEditLog();
    // Now One more failover. So NN1 should be able to failover successfully.
    cluster.transitionToStandby(1);
    cluster.transitionToActive(0);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:TestBookKeeperAsHASharedDir.java

示例12: testWebHdfsCreateSnapshot

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test snapshot creation through WebHdfs
 */
@Test
public void testWebHdfsCreateSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);

    try {
      webHdfs.createSnapshot(foo);
      fail("Cannot create snapshot on a non-snapshottable directory");
    } catch (Exception e) {
      GenericTestUtils.assertExceptionContains(
          "Directory is not a snapshottable directory", e);
    }

    // allow snapshots on /foo
    dfs.allowSnapshot(foo);
    // create snapshots on foo using WebHdfs
    webHdfs.createSnapshot(foo, "s1");
    // create snapshot without specifying name
    final Path spath = webHdfs.createSnapshot(foo, null);

    Assert.assertTrue(webHdfs.exists(spath));
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestWebHDFS.java

示例13: shutdownDFSCluster

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
private void shutdownDFSCluster(MiniDFSCluster cluster) {
  try {
    if (cluster != null)
      cluster.shutdown();

  } catch (Exception ignored) {
    // nothing we can do
    ignored.printStackTrace();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestMRCJCSocketFactory.java

示例14: test2NNRegistration

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * start multiple NNs and single DN and verifies per BP registrations and
 * handshakes.
 * 
 * @throws IOException
 */
@Test
public void test2NNRegistration() throws IOException {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
      .build();
  try {
    cluster.waitActive();
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);
    assertNotNull("cannot create nn1", nn1);
    assertNotNull("cannot create nn2", nn2);

    String bpid1 = FSImageTestUtil.getFSImage(nn1).getBlockPoolID();
    String bpid2 = FSImageTestUtil.getFSImage(nn2).getBlockPoolID();
    String cid1 = FSImageTestUtil.getFSImage(nn1).getClusterID();
    String cid2 = FSImageTestUtil.getFSImage(nn2).getClusterID();
    int lv1 =FSImageTestUtil.getFSImage(nn1).getLayoutVersion();
    int lv2 = FSImageTestUtil.getFSImage(nn2).getLayoutVersion();
    int ns1 = FSImageTestUtil.getFSImage(nn1).getNamespaceID();
    int ns2 = FSImageTestUtil.getFSImage(nn2).getNamespaceID();
    assertNotSame("namespace ids should be different", ns1, ns2);
    LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri="
        + nn1.getNameNodeAddress());
    LOG.info("nn2: lv=" + lv2 + ";cid=" + cid2 + ";bpid=" + bpid2 + ";uri="
        + nn2.getNameNodeAddress());

    // check number of volumes in fsdataset
    DataNode dn = cluster.getDataNodes().get(0);
    final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
    Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
    int i = 0;
    for (Map.Entry<String, Object> e : volInfos.entrySet()) {
      LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
    }
    // number of volumes should be 2 - [data1, data2]
    assertEquals("number of volumes is wrong", 2, volInfos.size());

    for (BPOfferService bpos : dn.getAllBpOs()) {
      LOG.info("BP: " + bpos);
    }

    BPOfferService bpos1 = dn.getAllBpOs()[0];
    BPOfferService bpos2 = dn.getAllBpOs()[1];

    // The order of bpos is not guaranteed, so fix the order
    if (getNNSocketAddress(bpos1).equals(nn2.getNameNodeAddress())) {
      BPOfferService tmp = bpos1;
      bpos1 = bpos2;
      bpos2 = tmp;
    }

    assertEquals("wrong nn address", getNNSocketAddress(bpos1),
        nn1.getNameNodeAddress());
    assertEquals("wrong nn address", getNNSocketAddress(bpos2),
        nn2.getNameNodeAddress());
    assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1);
    assertEquals("wrong bpid", bpos2.getBlockPoolId(), bpid2);
    assertEquals("wrong cid", dn.getClusterId(), cid1);
    assertEquals("cid should be same", cid2, cid1);
    assertEquals("namespace should be same",
        bpos1.bpNSInfo.namespaceID, ns1);
    assertEquals("namespace should be same",
        bpos2.bpNSInfo.namespaceID, ns2);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:74,代码来源:TestDataNodeMultipleRegistrations.java

示例15: testNewNamenodeTakesOverWriter

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test (timeout = 30000)
public void testNewNamenodeTakesOverWriter() throws Exception {
  File nn1Dir = new File(
      MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image-nn1");
  File nn2Dir = new File(
      MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image-nn2");
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      nn1Dir.getAbsolutePath());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      mjc.getQuorumJournalURI("myjournal").toString());

  // Start the cluster once to generate the dfs dirs
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(0)
    .manageNameDfsDirs(false)
    .checkExitOnShutdown(false)
    .build();

  // Shutdown the cluster before making a copy of the namenode dir
  // to release all file locks, otherwise, the copy will fail on
  // some platforms.
  cluster.shutdown();

  try {
    // Start a second NN pointed to the same quorum.
    // We need to copy the image dir from the first NN -- or else
    // the new NN will just be rejected because of Namespace mismatch.
    FileUtil.fullyDelete(nn2Dir);
    FileUtil.copy(nn1Dir, FileSystem.getLocal(conf).getRaw(),
        new Path(nn2Dir.getAbsolutePath()), false, conf);

    // Start the cluster again
    cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(0)
      .format(false)
      .manageNameDfsDirs(false)
      .checkExitOnShutdown(false)
      .build();

    cluster.getFileSystem().mkdirs(TEST_PATH);

    Configuration conf2 = new Configuration();
    conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
        nn2Dir.getAbsolutePath());
    conf2.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
        mjc.getQuorumJournalURI("myjournal").toString());
    MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf2)
      .numDataNodes(0)
      .format(false)
      .manageNameDfsDirs(false)
      .build();
    
    // Check that the new cluster sees the edits made on the old cluster
    try {
      assertTrue(cluster2.getFileSystem().exists(TEST_PATH));
    } finally {
      cluster2.shutdown();
    }
    
    // Check that, if we try to write to the old NN
    // that it aborts.
    try {
      cluster.getFileSystem().mkdirs(new Path("/x"));
      fail("Did not abort trying to write to a fenced NN");
    } catch (RemoteException re) {
      GenericTestUtils.assertExceptionContains(
          "Could not sync enough journals to persistent storage", re);
    }
  } finally {
    //cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:74,代码来源:TestNNWithQJM.java


注:本文中的org.apache.hadoop.hdfs.MiniDFSCluster.shutdown方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。