当前位置: 首页>>代码示例>>Java>>正文


Java Storage.StorageDirectory方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory方法的典型用法代码示例。如果您正苦于以下问题:Java Storage.StorageDirectory方法的具体用法?Java Storage.StorageDirectory怎么用?Java Storage.StorageDirectory使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.common.Storage的用法示例。


在下文中一共展示了Storage.StorageDirectory方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getInitialVolumeFailureInfos

import org.apache.hadoop.hdfs.server.common.Storage; //导入方法依赖的package包/类
/**
 * Gets initial volume failure information for all volumes that failed
 * immediately at startup.  The method works by determining the set difference
 * between all configured storage locations and the actual storage locations in
 * use after attempting to put all of them into service.
 *
 * @return each storage location that has failed
 */
private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(
    Collection<StorageLocation> dataLocations, DataStorage storage) {
  Set<String> failedLocationSet = Sets.newHashSetWithExpectedSize(
      dataLocations.size());
  for (StorageLocation sl: dataLocations) {
    failedLocationSet.add(sl.getFile().getAbsolutePath());
  }
  for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
       it.hasNext(); ) {
    Storage.StorageDirectory sd = it.next();
    failedLocationSet.remove(sd.getRoot().getAbsolutePath());
  }
  List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity(
      failedLocationSet.size());
  long failureDate = Time.now();
  for (String failedStorageLocation: failedLocationSet) {
    volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation,
        failureDate));
  }
  return volumeFailureInfos;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:FsDatasetImpl.java

示例2: createStorageDirs

import org.apache.hadoop.hdfs.server.common.Storage; //导入方法依赖的package包/类
private static void createStorageDirs(DataStorage storage, Configuration conf,
    int numDirs) throws IOException {
  List<Storage.StorageDirectory> dirs =
      new ArrayList<Storage.StorageDirectory>();
  List<String> dirStrings = new ArrayList<String>();
  for (int i = 0; i < numDirs; i++) {
    File loc = new File(BASE_DIR + "/data" + i);
    dirStrings.add(new Path(loc.toString()).toUri().toString());
    loc.mkdirs();
    dirs.add(createStorageDirectory(loc));
    when(storage.getStorageDir(i)).thenReturn(dirs.get(i));
  }

  String dataDir = StringUtils.join(",", dirStrings);
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir);
  when(storage.dirIterator()).thenReturn(dirs.iterator());
  when(storage.getNumStorageDirs()).thenReturn(numDirs);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestFsDatasetImpl.java

示例3: addVolume

import org.apache.hadoop.hdfs.server.common.Storage; //导入方法依赖的package包/类
private void addVolume(Collection<StorageLocation> dataLocations,
    Storage.StorageDirectory sd) throws IOException {
  final File dir = sd.getCurrentDir();
  final StorageType storageType =
      getStorageTypeFromLocations(dataLocations, sd.getRoot());

  // If IOException raises from FsVolumeImpl() or getVolumeMap(), there is
  // nothing needed to be rolled back to make various data structures, e.g.,
  // storageMap and asyncDiskService, consistent.
  FsVolumeImpl fsVolume = new FsVolumeImpl(
      this, sd.getStorageUuid(), dir, this.conf, storageType);
  FsVolumeReference ref = fsVolume.obtainReference();
  ReplicaMap tempVolumeMap = new ReplicaMap(this);
  fsVolume.getVolumeMap(tempVolumeMap, ramDiskReplicaTracker);

  synchronized (this) {
    volumeMap.addAll(tempVolumeMap);
    storageMap.put(sd.getStorageUuid(),
        new DatanodeStorage(sd.getStorageUuid(),
            DatanodeStorage.State.NORMAL,
            storageType));
    asyncDiskService.addVolume(sd.getCurrentDir());
    volumes.addVolume(ref);
  }

  LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:FsDatasetImpl.java

示例4: removeVolumes

import org.apache.hadoop.hdfs.server.common.Storage; //导入方法依赖的package包/类
/**
 * Removes a set of volumes from FsDataset.
 * @param volumesToRemove a set of absolute root path of each volume.
 * @param clearFailure set true to clear failure information.
 *
 * DataNode should call this function before calling
 * {@link DataStorage#removeVolumes(java.util.Collection)}.
 */
@Override
public synchronized void removeVolumes(
    Set<File> volumesToRemove, boolean clearFailure) {
  // Make sure that all volumes are absolute path.
  for (File vol : volumesToRemove) {
    Preconditions.checkArgument(vol.isAbsolute(),
        String.format("%s is not absolute path.", vol.getPath()));
  }
  for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
    Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
    final File absRoot = sd.getRoot().getAbsoluteFile();
    if (volumesToRemove.contains(absRoot)) {
      LOG.info("Removing " + absRoot + " from FsDataset.");

      // Disable the volume from the service.
      asyncDiskService.removeVolume(sd.getCurrentDir());
      volumes.removeVolume(absRoot, clearFailure);

      // Removed all replica information for the blocks on the volume. Unlike
      // updating the volumeMap in addVolume(), this operation does not scan
      // disks.
      for (String bpid : volumeMap.getBlockPoolList()) {
        for (Iterator<ReplicaInfo> it = volumeMap.replicas(bpid).iterator();
             it.hasNext(); ) {
          ReplicaInfo block = it.next();
          final File absBasePath =
              new File(block.getVolume().getBasePath()).getAbsoluteFile();
          if (absBasePath.equals(absRoot)) {
            invalidate(bpid, block);
            it.remove();
          }
        }
      }

      storageMap.remove(sd.getStorageUuid());
    }
  }
  setupAsyncLazyPersistThreads();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:48,代码来源:FsDatasetImpl.java

示例5: parseChangedVolumes

import org.apache.hadoop.hdfs.server.common.Storage; //导入方法依赖的package包/类
/**
 * Parse the new DFS_DATANODE_DATA_DIR value in the configuration to detect
 * changed volumes.
 * @param newVolumes a comma separated string that specifies the data volumes.
 * @return changed volumes.
 * @throws IOException if none of the directories are specified in the
 * configuration.
 */
@VisibleForTesting
ChangedVolumes parseChangedVolumes(String newVolumes) throws IOException {
  Configuration conf = new Configuration();
  conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes);
  List<StorageLocation> locations = getStorageLocations(conf);

  if (locations.isEmpty()) {
    throw new IOException("No directory is specified.");
  }

  ChangedVolumes results = new ChangedVolumes();
  results.newLocations.addAll(locations);

  for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
       it.hasNext(); ) {
    Storage.StorageDirectory dir = it.next();
    boolean found = false;
    for (Iterator<StorageLocation> sl = results.newLocations.iterator();
         sl.hasNext(); ) {
      StorageLocation location = sl.next();
      if (location.getFile().getCanonicalPath().equals(
          dir.getRoot().getCanonicalPath())) {
        sl.remove();
        results.unchangedLocations.add(location);
        found = true;
        break;
      }
    }

    if (!found) {
      results.deactivateLocations.add(
          StorageLocation.parse(dir.getRoot().toString()));
    }
  }

  return results;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:46,代码来源:DataNode.java

示例6: testAddVolumes

import org.apache.hadoop.hdfs.server.common.Storage; //导入方法依赖的package包/类
@Test
public void testAddVolumes() throws IOException {
  final int numNewVolumes = 3;
  final int numExistingVolumes = dataset.getVolumes().size();
  final int totalVolumes = numNewVolumes + numExistingVolumes;
  Set<String> expectedVolumes = new HashSet<String>();
  List<NamespaceInfo> nsInfos = Lists.newArrayList();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }
  for (int i = 0; i < numNewVolumes; i++) {
    String path = BASE_DIR + "/newData" + i;
    String pathUri = new Path(path).toUri().toString();
    expectedVolumes.add(new File(pathUri).toString());
    StorageLocation loc = StorageLocation.parse(pathUri);
    Storage.StorageDirectory sd = createStorageDirectory(new File(path));
    DataStorage.VolumeBuilder builder =
        new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
        anyListOf(NamespaceInfo.class)))
        .thenReturn(builder);

    dataset.addVolume(loc, nsInfos);
  }

  assertEquals(totalVolumes, dataset.getVolumes().size());
  assertEquals(totalVolumes, dataset.storageMap.size());

  Set<String> actualVolumes = new HashSet<String>();
  for (int i = 0; i < numNewVolumes; i++) {
    actualVolumes.add(
        dataset.getVolumes().get(numExistingVolumes + i).getBasePath());
  }
  assertEquals(actualVolumes.size(), expectedVolumes.size());
  assertTrue(actualVolumes.containsAll(expectedVolumes));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestFsDatasetImpl.java

示例7: testRemoveNewlyAddedVolume

import org.apache.hadoop.hdfs.server.common.Storage; //导入方法依赖的package包/类
@Test(timeout = 5000)
public void testRemoveNewlyAddedVolume() throws IOException {
  final int numExistingVolumes = dataset.getVolumes().size();
  List<NamespaceInfo> nsInfos = new ArrayList<>();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }
  String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
  StorageLocation loc = StorageLocation.parse(newVolumePath);

  Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
  DataStorage.VolumeBuilder builder =
      new DataStorage.VolumeBuilder(storage, sd);
  when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
      anyListOf(NamespaceInfo.class)))
      .thenReturn(builder);

  dataset.addVolume(loc, nsInfos);
  assertEquals(numExistingVolumes + 1, dataset.getVolumes().size());

  when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
  when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
  Set<File> volumesToRemove = new HashSet<>();
  volumesToRemove.add(loc.getFile());
  dataset.removeVolumes(volumesToRemove, true);
  assertEquals(numExistingVolumes, dataset.getVolumes().size());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestFsDatasetImpl.java

示例8: testAddVolumeFailureReleasesInUseLock

import org.apache.hadoop.hdfs.server.common.Storage; //导入方法依赖的package包/类
@Test
public void testAddVolumeFailureReleasesInUseLock() throws IOException {
  FsDatasetImpl spyDataset = spy(dataset);
  FsVolumeImpl mockVolume = mock(FsVolumeImpl.class);
  File badDir = new File(BASE_DIR, "bad");
  badDir.mkdirs();
  doReturn(mockVolume).when(spyDataset)
      .createFsVolume(anyString(), any(File.class), any(StorageType.class));
  doThrow(new IOException("Failed to getVolumeMap()"))
    .when(mockVolume).getVolumeMap(
      anyString(),
      any(ReplicaMap.class),
      any(RamDiskReplicaLruTracker.class));

  Storage.StorageDirectory sd = createStorageDirectory(badDir);
  sd.lock();
  DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
  when(storage.prepareVolume(eq(datanode), eq(badDir.getAbsoluteFile()),
      Matchers.<List<NamespaceInfo>>any()))
      .thenReturn(builder);

  StorageLocation location = StorageLocation.parse(badDir.toString());
  List<NamespaceInfo> nsInfos = Lists.newArrayList();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }

  try {
    spyDataset.addVolume(location, nsInfos);
    fail("Expect to throw MultipleIOException");
  } catch (MultipleIOException e) {
  }

  FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestFsDatasetImpl.java

示例9: testAddBackRemovedVolume

import org.apache.hadoop.hdfs.server.common.Storage; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testAddBackRemovedVolume()
    throws IOException, TimeoutException, InterruptedException,
    ReconfigurationException {
  startDFSCluster(1, 2);
  // Create some data on every volume.
  createFile(new Path("/test"), 32);

  DataNode dn = cluster.getDataNodes().get(0);
  Configuration conf = dn.getConf();
  String oldDataDir = conf.get(DFS_DATANODE_DATA_DIR_KEY);
  String keepDataDir = oldDataDir.split(",")[0];
  String removeDataDir = oldDataDir.split(",")[1];

  dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, keepDataDir);
  for (int i = 0; i < cluster.getNumNameNodes(); i++) {
    String bpid = cluster.getNamesystem(i).getBlockPoolId();
    BlockPoolSliceStorage bpsStorage =
        dn.getStorage().getBPStorage(bpid);
    // Make sure that there is no block pool level storage under removeDataDir.
    for (int j = 0; j < bpsStorage.getNumStorageDirs(); j++) {
      Storage.StorageDirectory sd = bpsStorage.getStorageDir(j);
      assertFalse(sd.getRoot().getAbsolutePath().startsWith(
          new File(removeDataDir).getAbsolutePath()
      ));
    }
    assertEquals(dn.getStorage().getBPStorage(bpid).getNumStorageDirs(), 1);
  }

  // Bring the removed directory back. It only successes if all metadata about
  // this directory were removed from the previous step.
  dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestDataNodeHotSwapVolumes.java

示例10: checkDir

import org.apache.hadoop.hdfs.server.common.Storage; //导入方法依赖的package包/类
/** Check whether the path is a valid DataNode data directory. */
private static void checkDir(File dataDir) {
  Storage.StorageDirectory sd = new Storage.StorageDirectory(dataDir);
  assertTrue(sd.getRoot().isDirectory());
  assertTrue(sd.getCurrentDir().isDirectory());
  assertTrue(sd.getVersionFile().isFile());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestDataStorage.java

示例11: checkDir

import org.apache.hadoop.hdfs.server.common.Storage; //导入方法依赖的package包/类
/** Check whether the root is a valid BlockPoolSlice storage. */
private static void checkDir(File root, String bpid) {
  Storage.StorageDirectory sd = new Storage.StorageDirectory(root);
  File bpRoot = new File(sd.getCurrentDir(), bpid);
  Storage.StorageDirectory bpSd = new Storage.StorageDirectory(bpRoot);
  assertTrue(bpSd.getRoot().isDirectory());
  assertTrue(bpSd.getCurrentDir().isDirectory());
  assertTrue(bpSd.getVersionFile().isFile());
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:10,代码来源:TestDataStorage.java

示例12: testAddVolumes

import org.apache.hadoop.hdfs.server.common.Storage; //导入方法依赖的package包/类
@Test
public void testAddVolumes() throws IOException {
  final int numNewVolumes = 3;
  final int numExistingVolumes = getNumVolumes();
  final int totalVolumes = numNewVolumes + numExistingVolumes;
  Set<String> expectedVolumes = new HashSet<String>();
  List<NamespaceInfo> nsInfos = Lists.newArrayList();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }
  for (int i = 0; i < numNewVolumes; i++) {
    String path = BASE_DIR + "/newData" + i;
    String pathUri = new Path(path).toUri().toString();
    expectedVolumes.add(new File(pathUri).toString());
    StorageLocation loc = StorageLocation.parse(pathUri);
    Storage.StorageDirectory sd = createStorageDirectory(new File(path));
    DataStorage.VolumeBuilder builder =
        new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
        anyListOf(NamespaceInfo.class)))
        .thenReturn(builder);

    dataset.addVolume(loc, nsInfos);
  }

  assertEquals(totalVolumes, getNumVolumes());
  assertEquals(totalVolumes, dataset.storageMap.size());

  Set<String> actualVolumes = new HashSet<String>();
  try (FsDatasetSpi.FsVolumeReferences volumes =
      dataset.getFsVolumeReferences()) {
    for (int i = 0; i < numNewVolumes; i++) {
      actualVolumes.add(volumes.get(numExistingVolumes + i).getBasePath());
    }
  }
  assertEquals(actualVolumes.size(), expectedVolumes.size());
  assertTrue(actualVolumes.containsAll(expectedVolumes));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:39,代码来源:TestFsDatasetImpl.java

示例13: createStorageDirectory

import org.apache.hadoop.hdfs.server.common.Storage; //导入方法依赖的package包/类
private static Storage.StorageDirectory createStorageDirectory(File root) {
  Storage.StorageDirectory sd = new Storage.StorageDirectory(root);
  dsForStorageUuid.createStorageID(sd, false);
  return sd;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:TestFsDatasetImpl.java

示例14: testAddVolumeFailures

import org.apache.hadoop.hdfs.server.common.Storage; //导入方法依赖的package包/类
@Test
public void testAddVolumeFailures() throws IOException {
  startDFSCluster(1, 1);
  final String dataDir = cluster.getDataDirectory();

  DataNode dn = cluster.getDataNodes().get(0);
  List<String> newDirs = Lists.newArrayList();
  final int NUM_NEW_DIRS = 4;
  for (int i = 0; i < NUM_NEW_DIRS; i++) {
    File newVolume = new File(dataDir, "new_vol" + i);
    newDirs.add(newVolume.toString());
    if (i % 2 == 0) {
      // Make addVolume() fail.
      newVolume.createNewFile();
    }
  }

  String newValue = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY) + "," +
      Joiner.on(",").join(newDirs);
  try {
    dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newValue);
    fail("Expect to throw IOException.");
  } catch (ReconfigurationException e) {
    String errorMessage = e.getCause().getMessage();
    String messages[] = errorMessage.split("\\r?\\n");
    assertEquals(2, messages.length);
    assertThat(messages[0], containsString("new_vol0"));
    assertThat(messages[1], containsString("new_vol2"));
  }

  // Make sure that vol0 and vol2's metadata are not left in memory.
  FsDatasetSpi<?> dataset = dn.getFSDataset();
  for (FsVolumeSpi volume : dataset.getVolumes()) {
    assertThat(volume.getBasePath(), is(not(anyOf(
        is(newDirs.get(0)), is(newDirs.get(2))))));
  }
  DataStorage storage = dn.getStorage();
  for (int i = 0; i < storage.getNumStorageDirs(); i++) {
    Storage.StorageDirectory sd = storage.getStorageDir(i);
    assertThat(sd.getRoot().toString(),
        is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
  }

  // The newly effective conf does not have vol0 and vol2.
  String[] effectiveVolumes =
      dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY).split(",");
  assertEquals(4, effectiveVolumes.length);
  for (String ev : effectiveVolumes) {
    assertThat(StorageLocation.parse(ev).getFile().getCanonicalPath(),
        is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:TestDataNodeHotSwapVolumes.java

示例15: testAddVolumeFailures

import org.apache.hadoop.hdfs.server.common.Storage; //导入方法依赖的package包/类
@Test
public void testAddVolumeFailures() throws IOException {
  startDFSCluster(1, 1);
  final String dataDir = cluster.getDataDirectory();

  DataNode dn = cluster.getDataNodes().get(0);
  List<String> newDirs = Lists.newArrayList();
  final int NUM_NEW_DIRS = 4;
  for (int i = 0; i < NUM_NEW_DIRS; i++) {
    File newVolume = new File(dataDir, "new_vol" + i);
    newDirs.add(newVolume.toString());
    if (i % 2 == 0) {
      // Make addVolume() fail.
      newVolume.createNewFile();
    }
  }

  String newValue = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY) + "," +
      Joiner.on(",").join(newDirs);
  try {
    dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newValue);
    fail("Expect to throw IOException.");
  } catch (ReconfigurationException e) {
    String errorMessage = e.getCause().getMessage();
    String messages[] = errorMessage.split("\\r?\\n");
    assertEquals(2, messages.length);
    assertThat(messages[0], containsString("new_vol0"));
    assertThat(messages[1], containsString("new_vol2"));
  }

  // Make sure that vol0 and vol2's metadata are not left in memory.
  FsDatasetSpi<?> dataset = dn.getFSDataset();
  try (FsDatasetSpi.FsVolumeReferences volumes =
      dataset.getFsVolumeReferences()) {
    for (FsVolumeSpi volume : volumes) {
      assertThat(volume.getBasePath(), is(not(anyOf(
          is(newDirs.get(0)), is(newDirs.get(2))))));
    }
  }
  DataStorage storage = dn.getStorage();
  for (int i = 0; i < storage.getNumStorageDirs(); i++) {
    Storage.StorageDirectory sd = storage.getStorageDir(i);
    assertThat(sd.getRoot().toString(),
        is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
  }

  // The newly effective conf does not have vol0 and vol2.
  String[] effectiveVolumes =
      dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY).split(",");
  assertEquals(4, effectiveVolumes.length);
  for (String ev : effectiveVolumes) {
    assertThat(StorageLocation.parse(ev).getFile().getCanonicalPath(),
        is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:56,代码来源:TestDataNodeHotSwapVolumes.java


注:本文中的org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。