当前位置: 首页>>代码示例>>Java>>正文


Java DFSUtil.getNsServiceRpcUris方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSUtil.getNsServiceRpcUris方法的典型用法代码示例。如果您正苦于以下问题:Java DFSUtil.getNsServiceRpcUris方法的具体用法?Java DFSUtil.getNsServiceRpcUris怎么用?Java DFSUtil.getNsServiceRpcUris使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DFSUtil的用法示例。


在下文中一共展示了DFSUtil.getNsServiceRpcUris方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testMoverCliWithHAConf

import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
@Test
public void testMoverCliWithHAConf() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0).build();
  HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
  try {
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", "/foo", "/bar");
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertEquals(new URI("hdfs://MyCluster"), nn);
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestMover.java

示例2: testMoverCliWithFederationHA

import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
@Test
public void testMoverCliWithFederationHA() throws Exception {
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(3))
      .numDataNodes(0).build();
  final Configuration conf = new HdfsConfiguration();
  DFSTestUtil.setFederatedHAConfiguration(cluster, conf);
  try {
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(3, namenodes.size());

    Iterator<URI> iter = namenodes.iterator();
    URI nn1 = iter.next();
    URI nn2 = iter.next();
    URI nn3 = iter.next();
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", nn1 + "/foo", nn1 + "/bar", nn2 + "/foo/bar", nn3 + "/foobar");
    Assert.assertEquals(3, movePaths.size());
    checkMovePaths(movePaths.get(nn1), new Path("/foo"), new Path("/bar"));
    checkMovePaths(movePaths.get(nn2), new Path("/foo/bar"));
    checkMovePaths(movePaths.get(nn3), new Path("/foobar"));
  } finally {
     cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestMover.java

示例3: runBalancer

import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
private void runBalancer(Configuration conf,
   long totalUsedSpace, long totalCapacity, Balancer.Parameters p,
   int excludedNodes) throws Exception {
  waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);

  // start rebalancing
  Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  final int r = runBalancer(namenodes, p, conf);
  if (conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, 
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT) ==0) {
    assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
    return;
  } else {
    assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
  }
  waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
  LOG.info("  .");
  waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, excludedNodes);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestBalancer.java

示例4: newMover

import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
static Mover newMover(Configuration conf) throws IOException {
  final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  Assert.assertEquals(1, namenodes.size());
  Map<URI, List<Path>> nnMap = Maps.newHashMap();
  for (URI nn : namenodes) {
    nnMap.put(nn, null);
  }

  final List<NameNodeConnector> nncs = NameNodeConnector.newNameNodeConnectors(
      nnMap, Mover.class.getSimpleName(), Mover.MOVER_ID_PATH, conf,
      NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
  return new Mover(nncs.get(0), conf, new AtomicInteger(0));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestMover.java

示例5: testMoverCli

import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
/**
 * Test Mover Cli by specifying a list of files/directories using option "-p".
 * There is only one namenode (and hence name service) specified in the conf.
 */
@Test
public void testMoverCli() throws Exception {
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration()).numDataNodes(0).build();
  try {
    final Configuration conf = cluster.getConfiguration(0);
    try {
      Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "bar");
      Assert.fail("Expected exception for illegal path bar");
    } catch (IllegalArgumentException e) {
      GenericTestUtils.assertExceptionContains("bar is not absolute", e);
    }

    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf);
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertTrue(movePaths.containsKey(nn));
    Assert.assertNull(movePaths.get(nn));

    movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar");
    namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, movePaths.size());
    nn = namenodes.iterator().next();
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestMover.java

示例6: runMover

import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
private void runMover() throws Exception {
  Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  Map<URI, List<Path>> nnMap = Maps.newHashMap();
  for (URI nn : namenodes) {
    nnMap.put(nn, null);
  }
  int result = Mover.run(nnMap, conf);
  Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), result);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestStorageMover.java

示例7: runBalancer

import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
private void runBalancer(Configuration conf,
    long totalUsedSpace, long totalCapacity) throws Exception {
  waitForHeartBeat(totalUsedSpace, totalCapacity);

  // start rebalancing
  Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
  assertEquals(ExitStatus.SUCCESS.getExitCode(), r);

  waitForHeartBeat(totalUsedSpace, totalCapacity);
  LOG.info("Rebalancing with default factor.");
  waitForBalancer(totalUsedSpace, totalCapacity);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestBalancerWithNodeGroup.java

示例8: runBalancerCanFinish

import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
private void runBalancerCanFinish(Configuration conf,
    long totalUsedSpace, long totalCapacity) throws Exception {
  waitForHeartBeat(totalUsedSpace, totalCapacity);

  // start rebalancing
  Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
  Assert.assertTrue(r == ExitStatus.SUCCESS.getExitCode() ||
      (r == ExitStatus.NO_MOVE_PROGRESS.getExitCode()));
  waitForHeartBeat(totalUsedSpace, totalCapacity);
  LOG.info("Rebalancing with default factor.");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestBalancerWithNodeGroup.java

示例9: testBalancerWithHANameNodes

import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
/**
 * Test a cluster with even distribution, then a new empty node is added to
 * the cluster. Test start a cluster with specified number of nodes, and fills
 * it to be 30% full (with a single file replicated identically to all
 * datanodes); It then adds one new empty node and starts balancing.
 */
@Test(timeout = 60000)
public void testBalancerWithHANameNodes() throws Exception {
  Configuration conf = new HdfsConfiguration();
  TestBalancer.initConf(conf);
  long newNodeCapacity = TestBalancer.CAPACITY; // new node's capacity
  String newNodeRack = TestBalancer.RACK2; // new node's rack
  // array of racks for original nodes in cluster
  String[] racks = new String[] { TestBalancer.RACK0, TestBalancer.RACK1 };
  // array of capacities of original nodes in cluster
  long[] capacities = new long[] { TestBalancer.CAPACITY,
      TestBalancer.CAPACITY };
  assertEquals(capacities.length, racks.length);
  int numOfDatanodes = capacities.length;
  NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
  nn1Conf.setIpcPort(NameNode.DEFAULT_PORT);
  Configuration copiedConf = new Configuration(conf);
  cluster = new MiniDFSCluster.Builder(copiedConf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(capacities.length)
      .racks(racks)
      .simulatedCapacities(capacities)
      .build();
  HATestUtil.setFailoverConfigurations(cluster, conf);
  try {
    cluster.waitActive();
    cluster.transitionToActive(1);
    Thread.sleep(500);
    client = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
        ClientProtocol.class).getProxy();
    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 30% full
    long totalUsedSpace = totalCapacity * 3 / 10;
    TestBalancer.createFile(cluster, TestBalancer.filePath, totalUsedSpace
        / numOfDatanodes, (short) numOfDatanodes, 1);

    // start up an empty node with the same capacity and on the same rack
    cluster.startDataNodes(conf, 1, true, null, new String[] { newNodeRack },
        new long[] { newNodeCapacity });
    totalCapacity += newNodeCapacity;
    TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client,
        cluster);
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    assertEquals(1, namenodes.size());
    assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster)));
    final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
    assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
    TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client,
        cluster, Balancer.Parameters.DEFAULT);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:59,代码来源:TestBalancerWithHANameNodes.java

示例10: testBalancerWithPinnedBlocks

import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
/**
 * Make sure that balancer can't move pinned blocks.
 * If specified favoredNodes when create file, blocks will be pinned use 
 * sticky bit.
 * @throws Exception
 */
@Test(timeout=100000)
public void testBalancerWithPinnedBlocks() throws Exception {
  // This test assumes stick-bit based block pin mechanism available only
  // in Linux/Unix. It can be unblocked on Windows when HDFS-7759 is ready to
  // provide a different mechanism for Windows.
  assumeTrue(!Path.WINDOWS);

  final Configuration conf = new HdfsConfiguration();
  initConf(conf);
  conf.setBoolean(DFS_DATANODE_BLOCK_PINNING_ENABLED, true);
  
  long[] capacities =  new long[] { CAPACITY, CAPACITY };
  String[] racks = { RACK0, RACK1 };
  int numOfDatanodes = capacities.length;

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length)
    .hosts(new String[]{"localhost", "localhost"})
    .racks(racks).simulatedCapacities(capacities).build();

  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf,
        cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
    
    // fill up the cluster to be 80% full
    long totalCapacity = sum(capacities);
    long totalUsedSpace = totalCapacity * 8 / 10;
    InetSocketAddress[] favoredNodes = new InetSocketAddress[numOfDatanodes];
    for (int i = 0; i < favoredNodes.length; i++) {
      favoredNodes[i] = cluster.getDataNodes().get(i).getXferAddress();
    }

    DFSTestUtil.createFile(cluster.getFileSystem(0), filePath, false, 1024,
        totalUsedSpace / numOfDatanodes, DEFAULT_BLOCK_SIZE,
        (short) numOfDatanodes, 0, false, favoredNodes);
    
    // start up an empty node with the same capacity
    cluster.startDataNodes(conf, 1, true, null, new String[] { RACK2 },
        new long[] { CAPACITY });
    
    totalCapacity += CAPACITY;
    
    // run balancer and validate results
    waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);

    // start rebalancing
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
    assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
    
  } finally {
    cluster.shutdown();
  }
  
}
 
开发者ID:naver,项目名称:hadoop,代码行数:62,代码来源:TestBalancer.java

示例11: testUnknownDatanode

import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
@Test(timeout=100000)
public void testUnknownDatanode() throws Exception {
  Configuration conf = new HdfsConfiguration();
  initConf(conf);
  long distribution[] = new long[] {50*CAPACITY/100, 70*CAPACITY/100, 0*CAPACITY/100};
  long capacities[] = new long[]{CAPACITY, CAPACITY, CAPACITY};
  String racks[] = new String[] {RACK0, RACK1, RACK1};

  int numDatanodes = distribution.length;
  if (capacities.length != numDatanodes || racks.length != numDatanodes) {
    throw new IllegalArgumentException("Array length is not the same");
  }

  // calculate total space that need to be filled
  final long totalUsedSpace = sum(distribution);

  // fill the cluster
  ExtendedBlock[] blocks = generateBlocks(conf, totalUsedSpace,
      (short) numDatanodes);

  // redistribute blocks
  Block[][] blocksDN = distributeBlocks(
      blocks, (short)(numDatanodes-1), distribution);

  // restart the cluster: do NOT format the cluster
  conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
      .format(false)
      .racks(racks)
      .simulatedCapacities(capacities)
      .build();
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    for(int i = 0; i < 3; i++) {
      cluster.injectBlocks(i, Arrays.asList(blocksDN[i]), null);
    }

    cluster.startDataNodes(conf, 1, true, null,
        new String[]{RACK0}, null,new long[]{CAPACITY});
    cluster.triggerHeartbeats();

    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Set<String>  datanodes = new HashSet<String>();
    datanodes.add(cluster.getDataNodes().get(0).getDatanodeId().getHostName());
    Balancer.Parameters p = new Balancer.Parameters(
        Balancer.Parameters.DEFAULT.policy,
        Balancer.Parameters.DEFAULT.threshold,
        Balancer.Parameters.DEFAULT.maxIdleIteration,
        datanodes, Balancer.Parameters.DEFAULT.nodesToBeIncluded);
    final int r = Balancer.run(namenodes, p, conf);
    assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:59,代码来源:TestBalancer.java

示例12: testTwoReplicaShouldNotInSameDN

import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
/**
 * Test special case. Two replicas belong to same block should not in same node.
 * We have 2 nodes.
 * We have a block in (DN0,SSD) and (DN1,DISK).
 * Replica in (DN0,SSD) should not be moved to (DN1,SSD).
 * Otherwise DN1 has 2 replicas.
 */
@Test(timeout=100000)
public void testTwoReplicaShouldNotInSameDN() throws Exception {
  final Configuration conf = new HdfsConfiguration();

  int blockSize = 5 * 1024 * 1024 ;
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
  conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);

  int numOfDatanodes =2;
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .racks(new String[]{"/default/rack0", "/default/rack0"})
      .storagesPerDatanode(2)
      .storageTypes(new StorageType[][]{
          {StorageType.SSD, StorageType.DISK},
          {StorageType.SSD, StorageType.DISK}})
      .storageCapacities(new long[][]{
          {100 * blockSize, 20 * blockSize},
          {20 * blockSize, 100 * blockSize}})
      .build();

  try {
    cluster.waitActive();

    //set "/bar" directory with ONE_SSD storage policy.
    DistributedFileSystem fs = cluster.getFileSystem();
    Path barDir = new Path("/bar");
    fs.mkdir(barDir,new FsPermission((short)777));
    fs.setStoragePolicy(barDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

    // Insert 30 blocks. So (DN0,SSD) and (DN1,DISK) are about half full,
    // and (DN0,SSD) and (DN1,DISK) are about 15% full.
    long fileLen  = 30 * blockSize;
    // fooFile has ONE_SSD policy. So
    // (DN0,SSD) and (DN1,DISK) have 2 replicas belong to same block.
    // (DN0,DISK) and (DN1,SSD) have 2 replicas belong to same block.
    Path fooFile = new Path(barDir, "foo");
    createFile(cluster, fooFile, fileLen, (short) numOfDatanodes, 0);
    // update space info
    cluster.triggerHeartbeats();

    Balancer.Parameters p = Balancer.Parameters.DEFAULT;
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    final int r = Balancer.run(namenodes, p, conf);

    // Replica in (DN0,SSD) was not moved to (DN1,SSD), because (DN1,DISK)
    // already has one. Otherwise DN1 will have 2 replicas.
    // For same reason, no replicas were moved.
    assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);

  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:63,代码来源:TestBalancer.java

示例13: runBalancer

import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
static void runBalancer(Suite s,
    final long totalUsed, final long totalCapacity) throws Exception {
  final double avg = totalUsed*100.0/totalCapacity;

  LOG.info("BALANCER 0: totalUsed=" + totalUsed
      + ", totalCapacity=" + totalCapacity
      + ", avg=" + avg);
  wait(s.clients, totalUsed, totalCapacity);
  LOG.info("BALANCER 1");

  // start rebalancing
  final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(s.conf);
  final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, s.conf);
  Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), r);

  LOG.info("BALANCER 2");
  wait(s.clients, totalUsed, totalCapacity);
  LOG.info("BALANCER 3");

  int i = 0;
  for(boolean balanced = false; !balanced; i++) {
    final long[] used = new long[s.cluster.getDataNodes().size()];
    final long[] cap = new long[used.length];

    for(int n = 0; n < s.clients.length; n++) {
      final DatanodeInfo[] datanodes = s.clients[n].getDatanodeReport(
          DatanodeReportType.ALL);
      Assert.assertEquals(datanodes.length, used.length);

      for(int d = 0; d < datanodes.length; d++) {
        if (n == 0) {
          used[d] = datanodes[d].getDfsUsed();
          cap[d] = datanodes[d].getCapacity();
          if (i % 100 == 0) {
            LOG.warn("datanodes[" + d
                + "]: getDfsUsed()=" + datanodes[d].getDfsUsed()
                + ", getCapacity()=" + datanodes[d].getCapacity());
          }
        } else {
          Assert.assertEquals(used[d], datanodes[d].getDfsUsed());
          Assert.assertEquals(cap[d], datanodes[d].getCapacity());
        }
      }
    }

    balanced = true;
    for(int d = 0; d < used.length; d++) {
      final double p = used[d]*100.0/cap[d];
      balanced = p <= avg + Balancer.Parameters.DEFAULT.threshold;
      if (!balanced) {
        if (i % 100 == 0) {
          LOG.warn("datanodes " + d + " is not yet balanced: "
              + "used=" + used[d] + ", cap=" + cap[d] + ", avg=" + avg);
          LOG.warn("TestBalancer.sum(used)=" + TestBalancer.sum(used)
              + ", TestBalancer.sum(cap)=" + TestBalancer.sum(cap));
        }
        sleep(100);
        break;
      }
    }
  }
  LOG.info("BALANCER 6");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:64,代码来源:TestBalancerWithMultipleNameNodes.java


注:本文中的org.apache.hadoop.hdfs.DFSUtil.getNsServiceRpcUris方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。