当前位置: 首页>>代码示例>>Java>>正文


Java FsVolumeSpi类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi的典型用法代码示例。如果您正苦于以下问题:Java FsVolumeSpi类的具体用法?Java FsVolumeSpi怎么用?Java FsVolumeSpi使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


FsVolumeSpi类属于org.apache.hadoop.hdfs.server.datanode.fsdataset包,在下文中一共展示了FsVolumeSpi类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: ScanInfo

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; //导入依赖的package包/类
ScanInfo(long blockId, File blockFile, File metaFile, FsVolumeSpi vol) {
  this.blockId = blockId;
  String condensedVolPath = vol == null ? null :
    getCondensedPath(vol.getBasePath());
  this.blockSuffix = blockFile == null ? null :
    getSuffix(blockFile, condensedVolPath);
  this.blockFileLength = (blockFile != null) ? blockFile.length() : 0; 
  if (metaFile == null) {
    this.metaSuffix = null;
  } else if (blockFile == null) {
    this.metaSuffix = getSuffix(metaFile, condensedVolPath);
  } else {
    this.metaSuffix = getSuffix(metaFile,
        condensedVolPath + blockSuffix);
  }
  this.volume = vol;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:DirectoryScanner.java

示例2: removeVolumeScanner

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; //导入依赖的package包/类
/**
 * Stops and removes a volume scanner.<p/>
 *
 * This function will block until the volume scanner has stopped.
 *
 * @param volume           The volume to remove.
 */
public synchronized void removeVolumeScanner(FsVolumeSpi volume) {
  if (!isEnabled()) {
    LOG.debug("Not removing volume scanner for {}, because the block " +
        "scanner is disabled.", volume.getStorageID());
    return;
  }
  VolumeScanner scanner = scanners.get(volume.getStorageID());
  if (scanner == null) {
    LOG.warn("No scanner found to remove for volumeId {}",
        volume.getStorageID());
    return;
  }
  LOG.info("Removing scanner for volume {} (StorageID {})",
      volume.getBasePath(), volume.getStorageID());
  scanner.shutdown();
  scanners.remove(volume.getStorageID());
  Uninterruptibles.joinUninterruptibly(scanner, 5, TimeUnit.MINUTES);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:BlockScanner.java

示例3: testLocalDirs

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; //导入依赖的package包/类
/**
 * Check that the permissions of the local DN directories are as expected.
 */
@Test
public void testLocalDirs() throws Exception {
  Configuration conf = new Configuration();
  final String permStr = conf.get(
    DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY);
  FsPermission expected = new FsPermission(permStr);

  // Check permissions on directories in 'dfs.datanode.data.dir'
  FileSystem localFS = FileSystem.getLocal(conf);
  for (DataNode dn : cluster.getDataNodes()) {
    for (FsVolumeSpi v : dn.getFSDataset().getVolumes()) {
      String dir = v.getBasePath();
      Path dataDir = new Path(dir);
      FsPermission actual = localFS.getFileStatus(dataDir).getPermission();
        assertEquals("Permission for dir: " + dataDir + ", is " + actual +
            ", while expected is " + expected, expected, actual);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestDiskError.java

示例4: ScanInfo

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; //导入依赖的package包/类
/**
 * Create a ScanInfo object for a block. This constructor will examine
 * the block data and meta-data files.
 *
 * @param blockId the block ID
 * @param blockFile the path to the block data file
 * @param metaFile the path to the block meta-data file
 * @param vol the volume that contains the block
 */
ScanInfo(long blockId, File blockFile, File metaFile, FsVolumeSpi vol) {
  this.blockId = blockId;
  String condensedVolPath = vol == null ? null :
    getCondensedPath(vol.getBasePath());
  this.blockSuffix = blockFile == null ? null :
    getSuffix(blockFile, condensedVolPath);
  this.blockFileLength = (blockFile != null) ? blockFile.length() : 0; 
  if (metaFile == null) {
    this.metaSuffix = null;
  } else if (blockFile == null) {
    this.metaSuffix = getSuffix(metaFile, condensedVolPath);
  } else {
    this.metaSuffix = getSuffix(metaFile,
        condensedVolPath + blockSuffix);
  }
  this.volume = vol;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:DirectoryScanner.java

示例5: getStoredReplicas

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; //导入依赖的package包/类
@Override
public Iterator<Replica> getStoredReplicas(String bpid) throws IOException {
  // Reload replicas from the disk.
  ReplicaMap replicaMap = new ReplicaMap(dataset);
  try (FsVolumeReferences refs = dataset.getFsVolumeReferences()) {
    for (FsVolumeSpi vol : refs) {
      FsVolumeImpl volume = (FsVolumeImpl) vol;
      volume.getVolumeMap(bpid, replicaMap, dataset.ramDiskReplicaTracker);
    }
  }

  // Cast ReplicaInfo to Replica, because ReplicaInfo assumes a file-based
  // FsVolumeSpi implementation.
  List<Replica> ret = new ArrayList<>();
  if (replicaMap.replicas(bpid) != null) {
    ret.addAll(replicaMap.replicas(bpid));
  }
  return ret.iterator();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:FsDatasetImplTestUtils.java

示例6: createReplicas

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; //导入依赖的package包/类
private void createReplicas(List<String> bpList, List<FsVolumeSpi> volumes,
                            FsDatasetTestUtils testUtils) throws IOException {
  // Here we create all different type of replicas and add it
  // to volume map. 
  // Created all type of ReplicaInfo, each under Blkpool corresponding volume
  long id = 1; // This variable is used as both blockId and genStamp
  for (String bpId: bpList) {
    for (FsVolumeSpi volume: volumes) {
      ExtendedBlock eb = new ExtendedBlock(bpId, id, 1, id);
      testUtils.createFinalizedReplica(volume, eb);
      id++;

      eb = new ExtendedBlock(bpId, id, 1, id);
      testUtils.createRBW(volume, eb);
      id++;

      eb = new ExtendedBlock(bpId, id, 1, id);
      testUtils.createReplicaWaitingToBeRecovered(volume, eb);
      id++;

      eb = new ExtendedBlock(bpId, id, 1, id);
      testUtils.createReplicaInPipeline(volume, eb);
      id++;
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:TestWriteToReplica.java

示例7: testLocalDirs

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; //导入依赖的package包/类
/**
 * Check that the permissions of the local DN directories are as expected.
 */
@Test
public void testLocalDirs() throws Exception {
  Configuration conf = new Configuration();
  final String permStr = conf.get(
    DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY);
  FsPermission expected = new FsPermission(permStr);

  // Check permissions on directories in 'dfs.datanode.data.dir'
  FileSystem localFS = FileSystem.getLocal(conf);
  for (DataNode dn : cluster.getDataNodes()) {
    try (FsDatasetSpi.FsVolumeReferences volumes =
        dn.getFSDataset().getFsVolumeReferences()) {
      for (FsVolumeSpi vol : volumes) {
        String dir = vol.getBasePath();
        Path dataDir = new Path(dir);
        FsPermission actual = localFS.getFileStatus(dataDir).getPermission();
        assertEquals("Permission for dir: " + dataDir + ", is " + actual +
            ", while expected is " + expected, expected, actual);
      }
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:TestDiskError.java

示例8: getRemaining

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; //导入依赖的package包/类
long getRemaining() throws IOException {
  long remaining = 0L;
  for (FsVolumeSpi vol : volumes.get()) {
    try (FsVolumeReference ref = vol.obtainReference()) {
      remaining += vol.getAvailable();
    } catch (ClosedChannelException e) {
      // ignore
    }
  }
  return remaining;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:FsVolumeList.java

示例9: addDifference

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; //导入依赖的package包/类
/** Block is not found on the disk */
private void addDifference(LinkedList<ScanInfo> diffRecord,
                           Stats statsRecord, long blockId,
                           FsVolumeSpi vol) {
  statsRecord.missingBlockFile++;
  statsRecord.missingMetaFile++;
  diffRecord.add(new ScanInfo(blockId, null, null, vol));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:DirectoryScanner.java

示例10: isValid

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; //导入依赖的package包/类
/** Is the given volume still valid in the dataset? */
private static boolean isValid(final FsDatasetSpi<?> dataset,
    final FsVolumeSpi volume) {
  for (FsVolumeSpi vol : dataset.getVolumes()) {
    if (vol == volume) {
      return true;
    }
  }
  return false;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:DirectoryScanner.java

示例11: reportBadBlocks

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; //导入依赖的package包/类
/**
 * Report a bad block which is hosted on the local DN.
 */
public void reportBadBlocks(ExtendedBlock block) throws IOException{
  BPOfferService bpos = getBPOSForBlock(block);
  FsVolumeSpi volume = getFSDataset().getVolume(block);
  bpos.reportBadBlocks(
      block, volume.getStorageID(), volume.getStorageType());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:DataNode.java

示例12: reportBadBlock

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; //导入依赖的package包/类
private void reportBadBlock(final BPOfferService bpos,
    final ExtendedBlock block, final String msg) {
  FsVolumeSpi volume = getFSDataset().getVolume(block);
  bpos.reportBadBlocks(
      block, volume.getStorageID(), volume.getStorageType());
  LOG.warn(msg);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:DataNode.java

示例13: addVolumeScanner

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; //导入依赖的package包/类
/**
* Set up a scanner for the given block pool and volume.
*
* @param ref              A reference to the volume.
*/
public synchronized void addVolumeScanner(FsVolumeReference ref) {
  boolean success = false;
  try {
    FsVolumeSpi volume = ref.getVolume();
    if (!isEnabled()) {
      LOG.debug("Not adding volume scanner for {}, because the block " +
          "scanner is disabled.", volume.getBasePath());
      return;
    }
    VolumeScanner scanner = scanners.get(volume.getStorageID());
    if (scanner != null) {
      LOG.error("Already have a scanner for volume {}.",
          volume.getBasePath());
      return;
    }
    LOG.debug("Adding scanner for volume {} (StorageID {})",
        volume.getBasePath(), volume.getStorageID());
    scanner = new VolumeScanner(conf, datanode, ref);
    scanner.start();
    scanners.put(volume.getStorageID(), scanner);
    success = true;
  } finally {
    if (!success) {
      // If we didn't create a new VolumeScanner object, we don't
      // need this reference to the volume.
      IOUtils.cleanup(null, ref);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:BlockScanner.java

示例14: handle

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; //导入依赖的package包/类
public void handle(ExtendedBlock block, IOException e) {
  FsVolumeSpi volume = scanner.volume;
  if (e == null) {
    LOG.trace("Successfully scanned {} on {}", block, volume.getBasePath());
    return;
  }
  // If the block does not exist anymore, then it's not an error.
  if (!volume.getDataset().contains(block)) {
    LOG.debug("Volume {}: block {} is no longer in the dataset.",
        volume.getBasePath(), block);
    return;
  }
  // If the block exists, the exception may due to a race with write:
  // The BlockSender got an old block path in rbw. BlockReceiver removed
  // the rbw block from rbw to finalized but BlockSender tried to open the
  // file before BlockReceiver updated the VolumeMap. The state of the
  // block can be changed again now, so ignore this error here. If there
  // is a block really deleted by mistake, DirectoryScan should catch it.
  if (e instanceof FileNotFoundException ) {
    LOG.info("Volume {}: verification failed for {} because of " +
            "FileNotFoundException.  This may be due to a race with write.",
        volume.getBasePath(), block);
    return;
  }
  LOG.warn("Reporting bad {} on {}", block, volume.getBasePath());
  try {
    scanner.datanode.reportBadBlocks(block);
  } catch (IOException ie) {
    // This is bad, but not bad enough to shut down the scanner.
    LOG.warn("Cannot report bad " + block.getBlockId(), e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:VolumeScanner.java

示例15: setVolumeFull

import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; //导入依赖的package包/类
private void setVolumeFull(DataNode dn, StorageType type) {
  List<? extends FsVolumeSpi> volumes = dn.getFSDataset().getVolumes();
  for (FsVolumeSpi v : volumes) {
    FsVolumeImpl volume = (FsVolumeImpl) v;
    if (volume.getStorageType() == type) {
      LOG.info("setCapacity to 0 for [" + volume.getStorageType() + "]"
          + volume.getStorageID());
      volume.setCapacityForTesting(0);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestStorageMover.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。