当前位置: 首页>>代码示例>>Java>>正文


Java FSVolume类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume的典型用法代码示例。如果您正苦于以下问题:Java FSVolume类的具体用法?Java FSVolume怎么用?Java FSVolume使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


FSVolume类属于org.apache.hadoop.hdfs.server.datanode.FSDataset包,在下文中一共展示了FSVolume类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createInlineBlockFile

import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; //导入依赖的package包/类
/** Create block file and corresponding metafile in a rondom volume */
private long createInlineBlockFile(int checksumType) throws IOException {
  FSVolume[] volumes = data.volumes.getVolumes();
  int index = rand.nextInt(volumes.length - 1);
  long id = getFreeBlockId();
  File finalizedDir = volumes[index].getNamespaceSlice(nsid).getCurrentDir();
  int checksumSize = DataChecksum.getChecksumSizeByType(checksumType);
  String inlineFileName = getInlineBlockFileName(id, checksumType, checksumSize);
  File file = new File(finalizedDir, inlineFileName);
  assertTrue(file.createNewFile());
  PrintWriter pw = new PrintWriter(file);
  int desiredLength = (int)BlockInlineChecksumReader.getFileLengthFromBlockSize(1, 1, checksumSize);
  for(int i = 0; i < desiredLength; i++) {
    pw.write(Character.getNumericValue('0'));
  }
  pw.close();
  LOG.info("Created block file " + file.getName());
  return id;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:20,代码来源:TestDirectoryScannerInlineFiles.java

示例2: getSeparateFilesLayoutScanInfo

import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; //导入依赖的package包/类
static DiskScanInfo getSeparateFilesLayoutScanInfo(long blockId,
    FileInfo blockFileInfo, FileInfo metaFileInfo, FSVolume vol) {
  File metaFile = null;
  long genStamp = Block.GRANDFATHER_GENERATION_STAMP;
  if (metaFileInfo != null) {
    metaFile = metaFileInfo.file;
    genStamp = metaFileInfo.getStamp;

  }
  File blockFile = null;
  long fileLength = 0;
  if (blockFileInfo != null) {
    blockFile = blockFileInfo.file;
    fileLength = blockFile.length();
  }
  return new DiskScanInfo(SEPARATE_FILES_LAYOUT, blockId, blockFile,
      metaFile, vol, fileLength, genStamp);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:19,代码来源:DirectoryScanner.java

示例3: getInlineFilesLayoutScanInfo

import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; //导入依赖的package包/类
static DiskScanInfo getInlineFilesLayoutScanInfo(long blockId,
    FileInfo singleFileInfo, FSVolume vol) {
  String[] groundSeparated = StringUtils
      .split(singleFileInfo.fileName, '_');
  if (groundSeparated.length != 6) {
    throw new IllegalStateException("FileName \"" + singleFileInfo.fileName
        + "\" doesn't " + "reflect new layout format!");
  }
  int checksumType = Integer.parseInt(groundSeparated[4]);
  int bytesPerChecksum = Integer.parseInt(groundSeparated[5]);
  long fileLength = BlockInlineChecksumReader.getBlockSizeFromFileLength(
      singleFileInfo.file.length(), checksumType, bytesPerChecksum);
  return new DiskScanInfo(INLINE_CHECKSUM_LAYOUT, blockId,
      singleFileInfo.file, singleFileInfo.file, vol, fileLength,
      singleFileInfo.getStamp);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:17,代码来源:DirectoryScanner.java

示例4: getVolumeInfo

import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; //导入依赖的package包/类
/**
 * Returned information is a JSON representation of a map with
 * volume name as the key and value is a map of volume attribute
 * keys to its values
 */
@Override // DataNodeMXBean
public String getVolumeInfo() {
  final Map<String, Object> info = new HashMap<String, Object>();
  try {
    FSVolume[] volumes = ((FSDataset)this.data).volumes.getVolumes();
    for (FSVolume v : volumes) {
      final Map<String, Object> innerInfo = new HashMap<String, Object>();
      innerInfo.put("usedSpace", v.getDfsUsed());
      innerInfo.put("freeSpace", v.getAvailable());
      innerInfo.put("reservedSpace", v.getReserved());
      info.put(v.getDir().toString(), innerInfo);
    }
    return JSON.toString(info);
  } catch (IOException e) {
    LOG.info("Cannot get volume info.", e);
    return "ERROR";
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:24,代码来源:DataNode.java

示例5: removeUnhealthyVolumes

import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; //导入依赖的package包/类
synchronized int removeUnhealthyVolumes(Collection<FSVolume> failed_vols,
    FSDatasetDeltaInterface datasetDelta) {
  int removed_blocks = 0;

  Iterator<Entry<Block, DatanodeBlockInfo>> dbi = blockInfoMap.entrySet()
      .iterator();
  while (dbi.hasNext()) {
    Entry<Block, DatanodeBlockInfo> entry = dbi.next();
    for (FSVolume v : failed_vols) {
      if (entry.getValue().getBlockDataFile().getVolume() == v) {
        DataNode.LOG.warn("removing block " + entry.getKey().getBlockId()
            + " from vol " + v.toString() + ", form namespace: "
            + namespaceId);
        dbi.remove();
        if (datasetDelta != null) {
          datasetDelta.removeBlock(namespaceId, entry.getKey());
        }
        removed_blocks++;
        break;
      }
    }
  }
  return removed_blocks;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:25,代码来源:NamespaceMap.java

示例6: getBlockCrcPerVolume

import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; //导入依赖的package包/类
/**
 * get a list of block info with CRC information per FS volume.
 * 
 * @param volumes
 *          Volumes are interested in get the list
 * @return a map from FSVolume to buckets -> (Block -> DatanodeBlockInfo) in
 *         the volume and has CRC information. The first level value is a
 *         list, each one on the list is for a bucket. The order on the list
 *         is the bucket ID. The third level is a map from block to datablock
 *         info.
 */
Map<FSVolume, List<Map<Block, DatanodeBlockInfo>>> getBlockCrcPerVolume(
    List<FSVolume> volumes) {
  Map<FSVolume, List<Map<Block, DatanodeBlockInfo>>> retMap =
      new HashMap<FSVolume, List<Map<Block, DatanodeBlockInfo>>>();
  for (FSVolume volume : volumes) {
    List<Map<Block, DatanodeBlockInfo>> newSubMap = new ArrayList<Map<Block, DatanodeBlockInfo>>(
        numBucket);
    for (int i = 0; i < numBucket; i++) {
      newSubMap.add(new HashMap<Block, DatanodeBlockInfo>());
    }
    retMap.put(volume, newSubMap);
  }
  for (BlockBucket bb : blockBuckets) {
    bb.getBlockCrcPerVolume(retMap);
  }
  return retMap;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:29,代码来源:NamespaceMap.java

示例7: setup

import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; //导入依赖的package包/类
private void setup(FSDataset dataSet) throws IOException {
  // setup replicas map
  ReplicasMap replicasMap = dataSet.volumeMap;
  FSVolume vol = dataSet.volumes.getNextVolume(0);
  ReplicaInfo replicaInfo = new FinalizedReplica(
      blocks[FINALIZED], vol, vol.getDir());
  replicasMap.add(replicaInfo);
  replicaInfo.getBlockFile().createNewFile();
  replicaInfo.getMetaFile().createNewFile();
  
  replicasMap.add(new ReplicaInPipeline(
      blocks[TEMPORARY].getBlockId(),
      blocks[TEMPORARY].getGenerationStamp(), vol, 
      vol.createTmpFile(blocks[TEMPORARY]).getParentFile()));
  
  replicaInfo = new ReplicaBeingWritten(blocks[RBW], vol, 
      vol.createRbwFile(blocks[RBW]).getParentFile(), null);
  replicasMap.add(replicaInfo);
  replicaInfo.getBlockFile().createNewFile();
  replicaInfo.getMetaFile().createNewFile();
  
  replicasMap.add(new ReplicaWaitingToBeRecovered(blocks[RWR], vol, 
      vol.createRbwFile(blocks[RWR]).getParentFile()));
  replicasMap.add(new ReplicaUnderRecovery(
      new FinalizedReplica(blocks[RUR], vol, vol.getDir()), 2007));    
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:27,代码来源:TestWriteToReplica.java

示例8: removeUnhealthyVolumes

import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; //导入依赖的package包/类
synchronized int removeUnhealthyVolumes(Collection<FSVolume> failed_vols) {
  int removed_blocks = 0;

  for (Integer namespaceId : namespaceMap.keySet()) {
    Map<Block, DatanodeBlockInfo> m = namespaceMap.get(namespaceId);
    Iterator<Entry<Block, DatanodeBlockInfo>> dbi = m.entrySet().iterator();
    while (dbi.hasNext()) {
      Entry<Block, DatanodeBlockInfo> entry = dbi.next();
      for (FSVolume v : failed_vols) {
        if (entry.getValue().getVolume() == v) {
          DataNode.LOG.warn("removing block " + entry.getKey().getBlockId()
              + " from vol " + v.toString() + ", form namespace: "
              + namespaceId);
          dbi.remove();
          removed_blocks++;
          break;
        }
      }
    }
  }
  return removed_blocks;
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:23,代码来源:VolumeMap.java

示例9: setUpCluster

import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; //导入依赖的package包/类
@BeforeClass
public static void setUpCluster() {
  LOG.info("setting up!");
  Configuration CONF = new Configuration();
  CONF.setLong("dfs.block.size", 100);
  CONF.setInt("io.bytes.per.checksum", 1);
  CONF.setLong("dfs.heartbeat.interval", 1L);
  CONF.setInt("dfs.datanode.directoryscan.interval", 1000);
  
  try{
    cluster = new MiniDFSCluster(CONF, 1, true, null);
    cluster.waitActive();

    dn = cluster.getDataNodes().get(0);
    nsid = dn.getAllNamespaces()[0];
    scanner = dn.directoryScanner;
    data = (FSDataset)dn.data;
    Field f = DirectoryScanner.class.getDeclaredField("delta");
    f.setAccessible(true);
    delta = (FSDatasetDelta)f.get(scanner);

    fs = cluster.getFileSystem();
    
    List<File> volumes = new ArrayList<File>();
    for(FSVolume vol : data.volumes.getVolumes()) {
      volumes.add(vol.getDir());
    }
    data.asyncDiskService = new TestDirectoryScanner.FSDatasetAsyncDiscServiceMock(
        volumes.toArray(new File[volumes.size()]), CONF);
    
  } catch (Exception e) {
    e.printStackTrace();
    fail("setup failed");
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:36,代码来源:TestDirectoryScannerInlineFiles.java

示例10: createBlockFile

import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; //导入依赖的package包/类
/** Create a block file in a random volume*/
private long createBlockFile() throws IOException {
  FSVolume[] volumes = fds.volumes.getVolumes();
  int index = rand.nextInt(volumes.length - 1);
  long id = getFreeBlockId();
  File finalizedDir = volumes[index].getNamespaceSlice(nsid).getCurrentDir();
  File file = new File(finalizedDir, getBlockFile(id));
  if (file.createNewFile()) {
    LOG.info("Created block file " + file.getName());
  }
  return id;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:13,代码来源:TestDirectoryScanner.java

示例11: createMetaFile

import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; //导入依赖的package包/类
/** Create a metafile in a random volume*/
private long createMetaFile() throws IOException {
  FSVolume[] volumes = fds.volumes.getVolumes();
  int index = rand.nextInt(volumes.length - 1);
  long id = getFreeBlockId();
  File finalizedDir = volumes[index].getNamespaceSlice(nsid).getCurrentDir();
  File file = new File(finalizedDir, getMetaFile(id));
  if (file.createNewFile()) {
    LOG.info("Created metafile " + file.getName());
  }
  return id;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:13,代码来源:TestDirectoryScanner.java

示例12: createBlockMetaFile

import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; //导入依赖的package包/类
/** Create block file and corresponding metafile in a rondom volume */
private long createBlockMetaFile() throws IOException {
  FSVolume[] volumes = fds.volumes.getVolumes();
  int index = rand.nextInt(volumes.length - 1);
  long id = getFreeBlockId();
  File finalizedDir = volumes[index].getNamespaceSlice(nsid).getCurrentDir();
  File file = new File(finalizedDir, getBlockFile(id));
  if (file.createNewFile()) {
    LOG.info("Created block file " + file.getName());

    // Create files with same prefix as block file but extension names
    // such that during sorting, these files appear around meta file
    // to test how DirectoryScanner handles extraneous files
    String name1 = file.getAbsolutePath() + ".l";
    String name2 = file.getAbsolutePath() + ".n";
    file = new File(name1);
    if (file.createNewFile()) {
      LOG.info("Created extraneous file " + name1);
    }

    file = new File(name2);
    if (file.createNewFile()) {
      LOG.info("Created extraneous file " + name2);
    }

    file = new File(finalizedDir, getMetaFile(id));
    if (file.createNewFile()) {
      LOG.info("Created metafile " + file.getName());
    }
  }
  return id;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:33,代码来源:TestDirectoryScanner.java

示例13: DatanodeBlockInfo

import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; //导入依赖的package包/类
DatanodeBlockInfo(FSVolume vol, File file, long finalizedSize,
    boolean visible, boolean inlineChecksum, int checksumType,
    int bytesPerChecksum, boolean blockCrcValid, int blockCrc) {
  this.finalizedSize = finalizedSize;
  detached = false;
  this.visible = visible;
  this.inlineChecksum = inlineChecksum;
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
  this.blockCrcValid = blockCrcValid;
  this.blockCrc = blockCrc;
  this.block = null;
  this.blockDataFile = new BlockDataFile(file, vol);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:15,代码来源:DatanodeBlockInfo.java

示例14: DiskScanInfo

import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; //导入依赖的package包/类
DiskScanInfo(int layout, long blockId, File blockFile, File metaFile,
    FSVolume vol, long fileLength, long genStamp) {
  this.blockId = blockId;
  this.metaFile = metaFile;
  this.blockFile = blockFile;
  this.volume = vol;
  this.fileLength = fileLength;
  this.genStamp = genStamp;
  this.layout = layout;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:11,代码来源:DirectoryScanner.java

示例15: removeUnhealthyVolumes

import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; //导入依赖的package包/类
synchronized int removeUnhealthyVolumes(Collection<FSVolume> failed_vols) {
  int removed_blocks = 0;

  for (Integer namespaceId : nsMap.keySet()) {
    removed_blocks += nsMap.get(namespaceId).removeUnhealthyVolumes(
        failed_vols, datasetDelta);
  }
  return removed_blocks;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:10,代码来源:VolumeMap.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。