当前位置: 首页>>代码示例>>Java>>正文


Java DataStorage类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.DataStorage的典型用法代码示例。如果您正苦于以下问题:Java DataStorage类的具体用法?Java DataStorage怎么用?Java DataStorage使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DataStorage类属于org.apache.hadoop.hdfs.server.datanode包,在下文中一共展示了DataStorage类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getInitialVolumeFailureInfos

import org.apache.hadoop.hdfs.server.datanode.DataStorage; //导入依赖的package包/类
/**
 * Gets initial volume failure information for all volumes that failed
 * immediately at startup.  The method works by determining the set difference
 * between all configured storage locations and the actual storage locations in
 * use after attempting to put all of them into service.
 *
 * @return each storage location that has failed
 */
private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(
    Collection<StorageLocation> dataLocations, DataStorage storage) {
  Set<String> failedLocationSet = Sets.newHashSetWithExpectedSize(
      dataLocations.size());
  for (StorageLocation sl: dataLocations) {
    failedLocationSet.add(sl.getFile().getAbsolutePath());
  }
  for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
       it.hasNext(); ) {
    Storage.StorageDirectory sd = it.next();
    failedLocationSet.remove(sd.getRoot().getAbsolutePath());
  }
  List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity(
      failedLocationSet.size());
  long failureDate = Time.now();
  for (String failedStorageLocation: failedLocationSet) {
    volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation,
        failureDate));
  }
  return volumeFailureInfos;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:FsDatasetImpl.java

示例2: isBPDirEmpty

import org.apache.hadoop.hdfs.server.datanode.DataStorage; //导入依赖的package包/类
boolean isBPDirEmpty(String bpid) throws IOException {
  File volumeCurrentDir = this.getCurrentDir();
  File bpDir = new File(volumeCurrentDir, bpid);
  File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
  File finalizedDir = new File(bpCurrentDir,
      DataStorage.STORAGE_DIR_FINALIZED);
  File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
  if (finalizedDir.exists() && !DatanodeUtil.dirNoFilesRecursive(
      finalizedDir)) {
    return false;
  }
  if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) {
    return false;
  }
  return true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:FsVolumeImpl.java

示例3: register

import org.apache.hadoop.hdfs.server.datanode.DataStorage; //导入依赖的package包/类
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:NNThroughputBenchmark.java

示例4: createStorageDirs

import org.apache.hadoop.hdfs.server.datanode.DataStorage; //导入依赖的package包/类
private static void createStorageDirs(DataStorage storage, Configuration conf,
    int numDirs) throws IOException {
  List<Storage.StorageDirectory> dirs =
      new ArrayList<Storage.StorageDirectory>();
  List<String> dirStrings = new ArrayList<String>();
  for (int i = 0; i < numDirs; i++) {
    File loc = new File(BASE_DIR + "/data" + i);
    dirStrings.add(new Path(loc.toString()).toUri().toString());
    loc.mkdirs();
    dirs.add(createStorageDirectory(loc));
    when(storage.getStorageDir(i)).thenReturn(dirs.get(i));
  }

  String dataDir = StringUtils.join(",", dirStrings);
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir);
  when(storage.dirIterator()).thenReturn(dirs.iterator());
  when(storage.getNumStorageDirs()).thenReturn(numDirs);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestFsDatasetImpl.java

示例5: setUp

import org.apache.hadoop.hdfs.server.datanode.DataStorage; //导入依赖的package包/类
@Before
public void setUp() throws IOException {
  datanode = mock(DataNode.class);
  storage = mock(DataStorage.class);
  this.conf = new Configuration();
  this.conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0);
  final DNConf dnConf = new DNConf(conf);

  when(datanode.getConf()).thenReturn(conf);
  when(datanode.getDnConf()).thenReturn(dnConf);
  final BlockScanner disabledBlockScanner = new BlockScanner(datanode, conf);
  when(datanode.getBlockScanner()).thenReturn(disabledBlockScanner);

  createStorageDirs(storage, conf, NUM_INIT_VOLUMES);
  dataset = new FsDatasetImpl(datanode, storage, conf);
  for (String bpid : BLOCK_POOL_IDS) {
    dataset.addBlockPool(bpid, conf);
  }

  assertEquals(NUM_INIT_VOLUMES, dataset.getVolumes().size());
  assertEquals(0, dataset.getNumFailedVolumes());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestFsDatasetImpl.java

示例6: createDataNodeVersionFile

import org.apache.hadoop.hdfs.server.datanode.DataStorage; //导入依赖的package包/类
/**
 * Create a <code>version</code> file for datanode inside the specified parent
 * directory.  If such a file already exists, it will be overwritten.
 * The given version string will be written to the file as the layout
 * version. None of the parameters may be null.
 *
 * @param parent directory where namenode VERSION file is stored
 * @param version StorageInfo to create VERSION file from
 * @param bpid Block pool Id
 * @param bpidToWrite Block pool Id to write into the version file
 */
public static void createDataNodeVersionFile(File[] parent,
    StorageInfo version, String bpid, String bpidToWrite) throws IOException {
  DataStorage storage = new DataStorage(version);
  storage.setDatanodeUuid("FixedDatanodeUuid");

  File[] versionFiles = new File[parent.length];
  for (int i = 0; i < parent.length; i++) {
    File versionFile = new File(parent[i], "VERSION");
    StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
    storage.createStorageID(sd, false);
    storage.writeProperties(versionFile, sd);
    versionFiles[i] = versionFile;
    File bpDir = BlockPoolSliceStorage.getBpRoot(bpid, parent[i]);
    createBlockPoolVersionFile(bpDir, version, bpidToWrite);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:UpgradeUtilities.java

示例7: register

import org.apache.hadoop.hdfs.server.datanode.DataStorage; //导入依赖的package包/类
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = dataNodeProto.registerDatanode(dnRegistration);
  dnRegistration.setNamespaceInfo(nsInfo);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  dataNodeProto.blockReport(dnRegistration, bpid, reports,
          new BlockReportContext(1, 0, System.nanoTime(), 0L));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:NNThroughputBenchmark.java

示例8: verifyBlockPoolExists

import org.apache.hadoop.hdfs.server.datanode.DataStorage; //导入依赖的package包/类
@Override
public void verifyBlockPoolExists(String bpid) throws IOException {
  FsVolumeImpl volume;
  try (FsVolumeReferences references = dataset.getFsVolumeReferences()) {
    volume = (FsVolumeImpl) references.get(0);
  }
  File bpDir = new File(volume.getCurrentDir(), bpid);
  File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
  File finalizedDir = new File(bpCurrentDir,
      DataStorage.STORAGE_DIR_FINALIZED);
  File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
  File versionFile = new File(bpCurrentDir, "VERSION");

  if (!finalizedDir.isDirectory()) {
    throw new IOException(finalizedDir.getPath() + " is not a directory.");
  }
  if (!rbwDir.isDirectory()) {
    throw new IOException(finalizedDir.getPath() + " is not a directory.");
  }
  if (!versionFile.exists()) {
    throw new IOException(
        "Version file: " + versionFile.getPath() + " does not exist.");
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:25,代码来源:FsDatasetImplTestUtils.java

示例9: setUp

import org.apache.hadoop.hdfs.server.datanode.DataStorage; //导入依赖的package包/类
@Before
public void setUp() throws IOException {
  datanode = mock(DataNode.class);
  storage = mock(DataStorage.class);
  this.conf = new Configuration();
  this.conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0);
  final DNConf dnConf = new DNConf(conf);

  when(datanode.getConf()).thenReturn(conf);
  when(datanode.getDnConf()).thenReturn(dnConf);
  final BlockScanner disabledBlockScanner = new BlockScanner(datanode, conf);
  when(datanode.getBlockScanner()).thenReturn(disabledBlockScanner);
  final ShortCircuitRegistry shortCircuitRegistry =
      new ShortCircuitRegistry(conf);
  when(datanode.getShortCircuitRegistry()).thenReturn(shortCircuitRegistry);

  createStorageDirs(storage, conf, NUM_INIT_VOLUMES);
  dataset = new FsDatasetImpl(datanode, storage, conf);
  for (String bpid : BLOCK_POOL_IDS) {
    dataset.addBlockPool(bpid, conf);
  }

  assertEquals(NUM_INIT_VOLUMES, getNumVolumes());
  assertEquals(0, dataset.getNumFailedVolumes());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:TestFsDatasetImpl.java

示例10: register

import org.apache.hadoop.hdfs.server.datanode.DataStorage; //导入依赖的package包/类
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage,
          new BlockListAsLongs(null, null).getBlockListAsLongs())
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:25,代码来源:NNThroughputBenchmark.java

示例11: transferBlocks

import org.apache.hadoop.hdfs.server.datanode.DataStorage; //导入依赖的package包/类
/**
 * Transfer blocks to another data-node.
 * Just report on behalf of the other data-node
 * that the blocks have been received.
 */
private int transferBlocks( Block blocks[], 
                            DatanodeInfo xferTargets[][],
                            String targetStorageIDs[][]
                          ) throws IOException {
  for(int i = 0; i < blocks.length; i++) {
    DatanodeInfo blockTargets[] = xferTargets[i];
    for(int t = 0; t < blockTargets.length; t++) {
      DatanodeInfo dnInfo = blockTargets[t];
      String targetStorageID = targetStorageIDs[i][t];
      DatanodeRegistration receivedDNReg;
      receivedDNReg = new DatanodeRegistration(dnInfo,
        new DataStorage(nsInfo),
        new ExportedBlockKeys(), VersionInfo.getVersion());
      ReceivedDeletedBlockInfo[] rdBlocks = {
        new ReceivedDeletedBlockInfo(
              blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
              null) };
      StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
          targetStorageID, rdBlocks) };
      nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
          .getNamesystem().getBlockPoolId(), report);
    }
  }
  return blocks.length;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:31,代码来源:NNThroughputBenchmark.java

示例12: createStorageDirs

import org.apache.hadoop.hdfs.server.datanode.DataStorage; //导入依赖的package包/类
private static void createStorageDirs(DataStorage storage, Configuration conf,
    int numDirs) throws IOException {
  List<Storage.StorageDirectory> dirs =
      new ArrayList<Storage.StorageDirectory>();
  List<String> dirStrings = new ArrayList<String>();
  for (int i = 0; i < numDirs; i++) {
    File loc = new File(BASE_DIR + "/data" + i);
    dirStrings.add(loc.toString());
    loc.mkdirs();
    dirs.add(createStorageDirectory(loc));
    when(storage.getStorageDir(i)).thenReturn(dirs.get(i));
  }

  String dataDir = StringUtils.join(",", dirStrings);
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir);
  when(storage.dirIterator()).thenReturn(dirs.iterator());
  when(storage.getNumStorageDirs()).thenReturn(numDirs);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:19,代码来源:TestFsDatasetImpl.java

示例13: transferBlocks

import org.apache.hadoop.hdfs.server.datanode.DataStorage; //导入依赖的package包/类
/**
 * Transfer blocks to another data-node. Just report on behalf of the
 * other data-node that the blocks have been received.
 */
private int transferBlocks(Block blocks[], DatanodeInfo xferTargets[][])
		throws IOException {
	for (int i = 0; i < blocks.length; i++) {
		DatanodeInfo blockTargets[] = xferTargets[i];
		for (int t = 0; t < blockTargets.length; t++) {
			DatanodeInfo dnInfo = blockTargets[t];
			DatanodeRegistration receivedDNReg;
			receivedDNReg = new DatanodeRegistration(dnInfo.getName());
			receivedDNReg.setStorageInfo(new DataStorage(nsInfo, dnInfo
					.getStorageID(), null), dnInfo.getStorageID());
			receivedDNReg.setInfoPort(dnInfo.getInfoPort());
			Block[] bi = new Block[] { blocks[i] };
			nameNode.blockReceivedAndDeleted(receivedDNReg, bi);
		}
	}
	// simulate block deletion form src
    Block[] blocksDeleted = new Block[blocks.length];
    for (int i=0; i<blocksDeleted.length; i++) {
      blocksDeleted[i] = new Block(blocks[i]);
      DFSUtil.markAsDeleted(blocksDeleted[i]);
    }
	nameNode.blockReceivedAndDeleted(dnRegistration, blocksDeleted);
	return blocks.length;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:29,代码来源:NNThroughputBenchmark.java

示例14: register

import org.apache.hadoop.hdfs.server.datanode.DataStorage; //导入依赖的package包/类
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          "", getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo, ""),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  DataNode.setNewStorageID(dnRegistration);
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(dnRegistration.getStorageID());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage,
          new BlockListAsLongs(null, null).getBlockListAsLongs())
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:24,代码来源:NNThroughputBenchmark.java

示例15: transferBlocks

import org.apache.hadoop.hdfs.server.datanode.DataStorage; //导入依赖的package包/类
/**
 * Transfer blocks to another data-node.
 * Just report on behalf of the other data-node
 * that the blocks have been received.
 */
private int transferBlocks( Block blocks[], 
                            DatanodeInfo xferTargets[][] 
                          ) throws IOException {
  for(int i = 0; i < blocks.length; i++) {
    DatanodeInfo blockTargets[] = xferTargets[i];
    for(int t = 0; t < blockTargets.length; t++) {
      DatanodeInfo dnInfo = blockTargets[t];
      DatanodeRegistration receivedDNReg;
      receivedDNReg = new DatanodeRegistration(dnInfo,
        new DataStorage(nsInfo, dnInfo.getStorageID()),
        new ExportedBlockKeys(), VersionInfo.getVersion());
      ReceivedDeletedBlockInfo[] rdBlocks = {
        new ReceivedDeletedBlockInfo(
              blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
              null) };
      StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
          receivedDNReg.getStorageID(), rdBlocks) };
      nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
          .getNamesystem().getBlockPoolId(), report);
    }
  }
  return blocks.length;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:29,代码来源:NNThroughputBenchmark.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.DataStorage类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。