当前位置: 首页>>代码示例>>Java>>正文


Java FsDatasetSpi.FsVolumeReferences方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences方法的典型用法代码示例。如果您正苦于以下问题:Java FsDatasetSpi.FsVolumeReferences方法的具体用法?Java FsDatasetSpi.FsVolumeReferences怎么用?Java FsDatasetSpi.FsVolumeReferences使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi的用法示例。


在下文中一共展示了FsDatasetSpi.FsVolumeReferences方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: startCluster

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
/**
 *
 * @param blockSize
 * @param perVolumeCapacity limit the capacity of each volume to the given
 *                          value. If negative, then don't limit.
 * @throws IOException
 */
private void startCluster(int blockSize, int numDatanodes, long perVolumeCapacity) throws IOException {
  initConfig(blockSize);

  cluster = new MiniDFSCluster
      .Builder(conf)
      .storagesPerDatanode(STORAGES_PER_DATANODE)
      .numDataNodes(numDatanodes)
      .build();
  fs = cluster.getFileSystem();
  client = fs.getClient();
  cluster.waitActive();

  if (perVolumeCapacity >= 0) {
    try (FsDatasetSpi.FsVolumeReferences volumes =
        cluster.getDataNodes().get(0).getFSDataset().getFsVolumeReferences()) {
      singletonVolumeRef = volumes.get(0).obtainReference();
    }
    singletonVolume = ((FsVolumeImpl) singletonVolumeRef.getVolume());
    singletonVolume.setCapacityForTesting(perVolumeCapacity);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:29,代码来源:TestSpaceReservation.java

示例2: testLocalDirs

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
/**
 * Check that the permissions of the local DN directories are as expected.
 */
@Test
public void testLocalDirs() throws Exception {
  Configuration conf = new Configuration();
  final String permStr = conf.get(
    DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY);
  FsPermission expected = new FsPermission(permStr);

  // Check permissions on directories in 'dfs.datanode.data.dir'
  FileSystem localFS = FileSystem.getLocal(conf);
  for (DataNode dn : cluster.getDataNodes()) {
    try (FsDatasetSpi.FsVolumeReferences volumes =
        dn.getFSDataset().getFsVolumeReferences()) {
      for (FsVolumeSpi vol : volumes) {
        String dir = vol.getBasePath();
        Path dataDir = new Path(dir);
        FsPermission actual = localFS.getFileStatus(dataDir).getPermission();
        assertEquals("Permission for dir: " + dataDir + ", is " + actual +
            ", while expected is " + expected, expected, actual);
      }
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:TestDiskError.java

示例3: setVolumeFull

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
private void setVolumeFull(DataNode dn, StorageType type) {
  try (FsDatasetSpi.FsVolumeReferences refs = dn.getFSDataset()
      .getFsVolumeReferences()) {
    for (FsVolumeSpi fvs : refs) {
      FsVolumeImpl volume = (FsVolumeImpl) fvs;
      if (volume.getStorageType() == type) {
        LOG.info("setCapacity to 0 for [" + volume.getStorageType() + "]"
            + volume.getStorageID());
        volume.setCapacityForTesting(0);
      }
    }
  } catch (IOException e) {
    LOG.error("Unexpected exception by closing FsVolumeReference", e);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:16,代码来源:TestStorageMover.java

示例4: ensureLazyPersistBlocksAreSaved

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
/**
 * Make sure at least one non-transient volume has a saved copy of the replica.
 * An infinite loop is used to ensure the async lazy persist tasks are completely
 * done before verification. Caller of ensureLazyPersistBlocksAreSaved expects
 * either a successful pass or timeout failure.
 */
protected final void ensureLazyPersistBlocksAreSaved(
    LocatedBlocks locatedBlocks) throws IOException, InterruptedException {
  final String bpid = cluster.getNamesystem().getBlockPoolId();

  final Set<Long> persistedBlockIds = new HashSet<Long>();

  try (FsDatasetSpi.FsVolumeReferences volumes =
      cluster.getDataNodes().get(0).getFSDataset().getFsVolumeReferences()) {
    while (persistedBlockIds.size() < locatedBlocks.getLocatedBlocks()
        .size()) {
      // Take 1 second sleep before each verification iteration
      Thread.sleep(1000);

      for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
        for (FsVolumeSpi v : volumes) {
          if (v.isTransientStorage()) {
            continue;
          }

          FsVolumeImpl volume = (FsVolumeImpl) v;
          File lazyPersistDir =
              volume.getBlockPoolSlice(bpid).getLazypersistDir();

          long blockId = lb.getBlock().getBlockId();
          File targetDir =
              DatanodeUtil.idToBlockDir(lazyPersistDir, blockId);
          File blockFile = new File(targetDir, lb.getBlock().getBlockName());
          if (blockFile.exists()) {
            // Found a persisted copy for this block and added to the Set
            persistedBlockIds.add(blockId);
          }
        }
      }
    }
  }

  // We should have found a persisted copy for each located block.
  assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size()));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:46,代码来源:LazyPersistTestCase.java

示例5: verifyDeletedBlocks

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
protected final boolean verifyDeletedBlocks(LocatedBlocks locatedBlocks)
    throws IOException, InterruptedException {

  LOG.info("Verifying replica has no saved copy after deletion.");
  triggerBlockReport();

  while(
      cluster.getFsDatasetTestUtils(0).getPendingAsyncDeletions()
      > 0L){
    Thread.sleep(1000);
  }

  final String bpid = cluster.getNamesystem().getBlockPoolId();
  final FsDatasetSpi<?> dataset =
      cluster.getDataNodes().get(0).getFSDataset();

  // Make sure deleted replica does not have a copy on either finalized dir of
  // transient volume or finalized dir of non-transient volume
  try (FsDatasetSpi.FsVolumeReferences volumes =
      dataset.getFsVolumeReferences()) {
    for (FsVolumeSpi vol : volumes) {
      FsVolumeImpl volume = (FsVolumeImpl) vol;
      File targetDir = (volume.isTransientStorage()) ?
          volume.getBlockPoolSlice(bpid).getFinalizedDir() :
          volume.getBlockPoolSlice(bpid).getLazypersistDir();
      if (verifyBlockDeletedFromDir(targetDir, locatedBlocks) == false) {
        return false;
      }
    }
  }
  return true;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:33,代码来源:LazyPersistTestCase.java

示例6: testSpaceReleasedOnUnexpectedEof

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
/**
 * Ensure that reserved space is released when the client goes away
 * unexpectedly.
 *
 * The verification is done for each replica in the write pipeline.
 *
 * @throws IOException
 */
@Test(timeout=300000)
public void testSpaceReleasedOnUnexpectedEof()
    throws IOException, InterruptedException, TimeoutException {
  final short replication = 3;
  startCluster(BLOCK_SIZE, replication, -1);

  final String methodName = GenericTestUtils.getMethodName();
  final Path file = new Path("/" + methodName + ".01.dat");

  // Write 1 byte to the file and kill the writer.
  FSDataOutputStream os = fs.create(file, replication);
  os.write(new byte[1]);
  os.hsync();
  DFSTestUtil.abortStream((DFSOutputStream) os.getWrappedStream());

  // Ensure all space reserved for the replica was released on each
  // DataNode.
  for (DataNode dn : cluster.getDataNodes()) {
    try (FsDatasetSpi.FsVolumeReferences volumes =
        dn.getFSDataset().getFsVolumeReferences()) {
      final FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
      GenericTestUtils.waitFor(new Supplier<Boolean>() {
        @Override
        public Boolean get() {
          return (volume.getReservedForReplicas() == 0);
        }
      }, 500, Integer.MAX_VALUE); // Wait until the test times out.
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:39,代码来源:TestSpaceReservation.java

示例7: getNumVolumes

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
private int getNumVolumes() {
  try (FsDatasetSpi.FsVolumeReferences volumes =
      dataset.getFsVolumeReferences()) {
    return volumes.size();
  } catch (IOException e) {
    return 0;
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:9,代码来源:TestFsDatasetImpl.java

示例8: testAddVolumes

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
@Test
public void testAddVolumes() throws IOException {
  final int numNewVolumes = 3;
  final int numExistingVolumes = getNumVolumes();
  final int totalVolumes = numNewVolumes + numExistingVolumes;
  Set<String> expectedVolumes = new HashSet<String>();
  List<NamespaceInfo> nsInfos = Lists.newArrayList();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }
  for (int i = 0; i < numNewVolumes; i++) {
    String path = BASE_DIR + "/newData" + i;
    String pathUri = new Path(path).toUri().toString();
    expectedVolumes.add(new File(pathUri).toString());
    StorageLocation loc = StorageLocation.parse(pathUri);
    Storage.StorageDirectory sd = createStorageDirectory(new File(path));
    DataStorage.VolumeBuilder builder =
        new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
        anyListOf(NamespaceInfo.class)))
        .thenReturn(builder);

    dataset.addVolume(loc, nsInfos);
  }

  assertEquals(totalVolumes, getNumVolumes());
  assertEquals(totalVolumes, dataset.storageMap.size());

  Set<String> actualVolumes = new HashSet<String>();
  try (FsDatasetSpi.FsVolumeReferences volumes =
      dataset.getFsVolumeReferences()) {
    for (int i = 0; i < numNewVolumes; i++) {
      actualVolumes.add(volumes.get(numExistingVolumes + i).getBasePath());
    }
  }
  assertEquals(actualVolumes.size(), expectedVolumes.size());
  assertTrue(actualVolumes.containsAll(expectedVolumes));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:39,代码来源:TestFsDatasetImpl.java

示例9: startCluster

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
@Before
public void startCluster() throws IOException {
  conf = new HdfsConfiguration();
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DN_COUNT).build();
  fs = cluster.getFileSystem();
  singletonNn = cluster.getNameNode();
  singletonDn = cluster.getDataNodes().get(0);
  bpos = singletonDn.getAllBpOs().get(0);
  actor = bpos.getBPServiceActors().get(0);
  try (FsDatasetSpi.FsVolumeReferences volumes =
      singletonDn.getFSDataset().getFsVolumeReferences()) {
    storageUuid = volumes.get(0).getStorageID();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:15,代码来源:TestIncrementalBlockReports.java

示例10: getVolume

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
/** Get the FsVolume on the given basePath */
private FsVolumeImpl getVolume(DataNode dn, File basePath)
    throws IOException {
  try (FsDatasetSpi.FsVolumeReferences volumes =
    dn.getFSDataset().getFsVolumeReferences()) {
    for (FsVolumeSpi vol : volumes) {
      if (vol.getBasePath().equals(basePath.getPath())) {
        return (FsVolumeImpl) vol;
      }
    }
  }
  return null;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:14,代码来源:TestDataNodeHotSwapVolumes.java

示例11: duplicateBlock

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
/**
 * Duplicate the given block on all volumes.
 * @param blockId
 * @throws IOException
 */
private void duplicateBlock(long blockId) throws IOException {
  synchronized (fds) {
    ReplicaInfo b = FsDatasetTestUtil.fetchReplicaInfo(fds, bpid, blockId);
    try (FsDatasetSpi.FsVolumeReferences volumes =
        fds.getFsVolumeReferences()) {
      for (FsVolumeSpi v : volumes) {
        if (v.getStorageID().equals(b.getVolume().getStorageID())) {
          continue;
        }

        // Volume without a copy of the block. Make a copy now.
        File sourceBlock = b.getBlockFile();
        File sourceMeta = b.getMetaFile();
        String sourceRoot = b.getVolume().getBasePath();
        String destRoot = v.getBasePath();

        String relativeBlockPath =
            new File(sourceRoot).toURI().relativize(sourceBlock.toURI())
                .getPath();
        String relativeMetaPath =
            new File(sourceRoot).toURI().relativize(sourceMeta.toURI())
                .getPath();

        File destBlock = new File(destRoot, relativeBlockPath);
        File destMeta = new File(destRoot, relativeMetaPath);

        destBlock.getParentFile().mkdirs();
        FileUtils.copyFile(sourceBlock, destBlock);
        FileUtils.copyFile(sourceMeta, destMeta);

        if (destBlock.exists() && destMeta.exists()) {
          LOG.info("Copied " + sourceBlock + " ==> " + destBlock);
          LOG.info("Copied " + sourceMeta + " ==> " + destMeta);
        }
      }
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:44,代码来源:TestDirectoryScanner.java

示例12: createBlockFile

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
/** Create a block file in a random volume*/
private long createBlockFile() throws IOException {
  long id = getFreeBlockId();
  try (FsDatasetSpi.FsVolumeReferences volumes = fds.getFsVolumeReferences()) {
    int numVolumes = volumes.size();
    int index = rand.nextInt(numVolumes - 1);
    File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
    File file = new File(finalizedDir, getBlockFile(id));
    if (file.createNewFile()) {
      LOG.info("Created block file " + file.getName());
    }
  }
  return id;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:15,代码来源:TestDirectoryScanner.java

示例13: createMetaFile

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
/** Create a metafile in a random volume*/
private long createMetaFile() throws IOException {
  long id = getFreeBlockId();
  try (FsDatasetSpi.FsVolumeReferences refs = fds.getFsVolumeReferences()) {
    int numVolumes = refs.size();
    int index = rand.nextInt(numVolumes - 1);

    File finalizedDir = refs.get(index).getFinalizedDir(bpid);
    File file = new File(finalizedDir, getMetaFile(id));
    if (file.createNewFile()) {
      LOG.info("Created metafile " + file.getName());
    }
  }
  return id;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:16,代码来源:TestDirectoryScanner.java

示例14: createBlockMetaFile

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
/** Create block file and corresponding metafile in a rondom volume */
private long createBlockMetaFile() throws IOException {
  long id = getFreeBlockId();

  try (FsDatasetSpi.FsVolumeReferences refs = fds.getFsVolumeReferences()) {
    int numVolumes = refs.size();
    int index = rand.nextInt(numVolumes - 1);

    File finalizedDir = refs.get(index).getFinalizedDir(bpid);
    File file = new File(finalizedDir, getBlockFile(id));
    if (file.createNewFile()) {
      LOG.info("Created block file " + file.getName());

      // Create files with same prefix as block file but extension names
      // such that during sorting, these files appear around meta file
      // to test how DirectoryScanner handles extraneous files
      String name1 = file.getAbsolutePath() + ".l";
      String name2 = file.getAbsolutePath() + ".n";
      file = new File(name1);
      if (file.createNewFile()) {
        LOG.info("Created extraneous file " + name1);
      }

      file = new File(name2);
      if (file.createNewFile()) {
        LOG.info("Created extraneous file " + name2);
      }

      file = new File(finalizedDir, getMetaFile(id));
      if (file.createNewFile()) {
        LOG.info("Created metafile " + file.getName());
      }
    }
  }
  return id;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:37,代码来源:TestDirectoryScanner.java

示例15: testLocatedFileStatusStorageIdsTypes

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; //导入方法依赖的package包/类
@Test(timeout=120000)
public void testLocatedFileStatusStorageIdsTypes() throws Exception {
  final Configuration conf = getTestConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(3).build();
  try {
    final DistributedFileSystem fs = cluster.getFileSystem();
    final Path testFile = new Path("/testListLocatedStatus");
    final int blockSize = 4096;
    final int numBlocks = 10;
    // Create a test file
    final int repl = 2;
    DFSTestUtil.createFile(fs, testFile, blockSize, numBlocks * blockSize,
        blockSize, (short) repl, 0xADDED);
    DFSTestUtil.waitForReplication(fs, testFile, (short) repl, 30000);
    // Get the listing
    RemoteIterator<LocatedFileStatus> it = fs.listLocatedStatus(testFile);
    assertTrue("Expected file to be present", it.hasNext());
    LocatedFileStatus stat = it.next();
    BlockLocation[] locs = stat.getBlockLocations();
    assertEquals("Unexpected number of locations", numBlocks, locs.length);

    Set<String> dnStorageIds = new HashSet<>();
    for (DataNode d : cluster.getDataNodes()) {
      try (FsDatasetSpi.FsVolumeReferences volumes = d.getFSDataset()
          .getFsVolumeReferences()) {
        for (FsVolumeSpi vol : volumes) {
          dnStorageIds.add(vol.getStorageID());
        }
      }
    }

    for (BlockLocation loc : locs) {
      String[] ids = loc.getStorageIds();
      // Run it through a set to deduplicate, since there should be no dupes
      Set<String> storageIds = new HashSet<>();
      Collections.addAll(storageIds, ids);
      assertEquals("Unexpected num storage ids", repl, storageIds.size());
      // Make sure these are all valid storage IDs
      assertTrue("Unknown storage IDs found!", dnStorageIds.containsAll
          (storageIds));
      // Check storage types are the default, since we didn't set any
      StorageType[] types = loc.getStorageTypes();
      assertEquals("Unexpected num storage types", repl, types.length);
      for (StorageType t: types) {
        assertEquals("Unexpected storage type", StorageType.DEFAULT, t);
      }
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:55,代码来源:TestDistributedFileSystem.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。