当前位置: 首页>>代码示例>>Java>>正文


Java DataStorage.STORAGE_DIR_FINALIZED属性代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.DataStorage.STORAGE_DIR_FINALIZED属性的典型用法代码示例。如果您正苦于以下问题:Java DataStorage.STORAGE_DIR_FINALIZED属性的具体用法?Java DataStorage.STORAGE_DIR_FINALIZED怎么用?Java DataStorage.STORAGE_DIR_FINALIZED使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.hdfs.server.datanode.DataStorage的用法示例。


在下文中一共展示了DataStorage.STORAGE_DIR_FINALIZED属性的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: isBPDirEmpty

boolean isBPDirEmpty(String bpid) throws IOException {
  File volumeCurrentDir = this.getCurrentDir();
  File bpDir = new File(volumeCurrentDir, bpid);
  File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
  File finalizedDir = new File(bpCurrentDir,
      DataStorage.STORAGE_DIR_FINALIZED);
  File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
  if (finalizedDir.exists() && !DatanodeUtil.dirNoFilesRecursive(
      finalizedDir)) {
    return false;
  }
  if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) {
    return false;
  }
  return true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:FsVolumeImpl.java

示例2: verifyBlockPoolExists

@Override
public void verifyBlockPoolExists(String bpid) throws IOException {
  FsVolumeImpl volume;
  try (FsVolumeReferences references = dataset.getFsVolumeReferences()) {
    volume = (FsVolumeImpl) references.get(0);
  }
  File bpDir = new File(volume.getCurrentDir(), bpid);
  File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
  File finalizedDir = new File(bpCurrentDir,
      DataStorage.STORAGE_DIR_FINALIZED);
  File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
  File versionFile = new File(bpCurrentDir, "VERSION");

  if (!finalizedDir.isDirectory()) {
    throw new IOException(finalizedDir.getPath() + " is not a directory.");
  }
  if (!rbwDir.isDirectory()) {
    throw new IOException(finalizedDir.getPath() + " is not a directory.");
  }
  if (!versionFile.exists()) {
    throw new IOException(
        "Version file: " + versionFile.getPath() + " does not exist.");
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:FsDatasetImplTestUtils.java

示例3: checkResult

/**
 * Verify that the current directory exists and that the previous directory
 * does not exist.  Verify that current hasn't been modified by comparing 
 * the checksum of all it's containing files with their original checksum.
 */
static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs,
  String bpid) throws Exception {
  List<File> dirs = Lists.newArrayList();
  for (int i = 0; i < nameNodeDirs.length; i++) {
    File curDir = new File(nameNodeDirs[i], "current");
    dirs.add(curDir);
    FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
  }
  
  FSImageTestUtil.assertParallelFilesAreIdentical(
      dirs, Collections.<String>emptySet());
  
  File dnCurDirs[] = new File[dataNodeDirs.length];
  for (int i = 0; i < dataNodeDirs.length; i++) {
    dnCurDirs[i] = new File(dataNodeDirs[i],"current");
    assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i],
            false), UpgradeUtilities.checksumMasterDataNodeContents());
  }
  for (int i = 0; i < nameNodeDirs.length; i++) {
    assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
  }

  if (bpid == null) {
    for (int i = 0; i < dataNodeDirs.length; i++) {
      assertFalse(new File(dataNodeDirs[i],"previous").isDirectory());
    }
  } else {
    for (int i = 0; i < dataNodeDirs.length; i++) {
      File bpRoot = BlockPoolSliceStorage.getBpRoot(bpid, dnCurDirs[i]);
      assertFalse(new File(bpRoot,"previous").isDirectory());
      
      File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED);
      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
              bpCurFinalizeDir, true),
              UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestDFSFinalize.java

示例4: isBPDirEmpty

boolean isBPDirEmpty(String bpid) throws IOException {
  File volumeCurrentDir = this.getCurrentDir();
  File bpDir = new File(volumeCurrentDir, bpid);
  File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
  File finalizedDir = new File(bpCurrentDir,
      DataStorage.STORAGE_DIR_FINALIZED);
  File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
  if (finalizedDir.exists() && FileUtil.list(finalizedDir).length != 0) {
    return false;
  }
  if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) {
    return false;
  }
  return true;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:15,代码来源:FsVolumeImpl.java

示例5: isBPDirEmpty

boolean isBPDirEmpty(String bpid) throws IOException {
  File volumeCurrentDir = this.getCurrentDir();
  File bpDir = new File(volumeCurrentDir, bpid);
  File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
  File finalizedDir =
      new File(bpCurrentDir, DataStorage.STORAGE_DIR_FINALIZED);
  File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
  if (finalizedDir.exists() && FileUtil.list(finalizedDir).length != 0) {
    return false;
  }
  if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) {
    return false;
  }
  return true;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:15,代码来源:FsVolumeImpl.java

示例6: BlockPoolSlice

/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf configuration
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  this.finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);
  this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST);
  if (!this.finalizedDir.exists()) {
    if (!this.finalizedDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + this.finalizedDir);
    }
  }

  this.deleteDuplicateReplicas = conf.getBoolean(
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  // Use cached value initially if available. Or the following call will
  // block until the initial du command completes.
  this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
  this.dfsUsage.start();

  // Make the dfs usage to be saved during shutdown.
  ShutdownHookManager.get().addShutdownHook(
    new Runnable() {
      @Override
      public void run() {
        if (!dfsUsedSaved) {
          saveDfsUsed();
        }
      }
    }, SHUTDOWN_HOOK_PRIORITY);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:68,代码来源:BlockPoolSlice.java

示例7: BlockPoolSlice

/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf configuration
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  this.finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);
  this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST);
  if (!this.finalizedDir.exists()) {
    if (!this.finalizedDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + this.finalizedDir);
    }
  }

  this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(conf);

  this.deleteDuplicateReplicas = conf.getBoolean(
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  // Use cached value initially if available. Or the following call will
  // block until the initial du command completes.
  this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
  this.dfsUsage.start();

  // Make the dfs usage to be saved during shutdown.
  ShutdownHookManager.get().addShutdownHook(
    new Runnable() {
      @Override
      public void run() {
        if (!dfsUsedSaved) {
          saveDfsUsed();
        }
      }
    }, SHUTDOWN_HOOK_PRIORITY);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:64,代码来源:BlockPoolSlice.java

示例8: BlockPoolSlice

/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  final File finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  final int maxBlocksPerDir = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY,
      DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT);
  this.finalizedDir = new LDir(finalizedDir, maxBlocksPerDir);
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  this.dfsUsage = new DU(bpDir, conf);
  this.dfsUsage.start();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:49,代码来源:BlockPoolSlice.java

示例9: BlockPoolSlice

/**
 * Create a blook pool slice
 *
 * @param bpid
 *     Block pool Id
 * @param volume
 *     {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir
 *     directory corresponding to the BlockPool
 * @param conf
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
  final File finalizedDir =
      new File(currentDir, DataStorage.STORAGE_DIR_FINALIZED);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends =
      conf.getBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
          DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  final int maxBlocksPerDir =
      conf.getInt(DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY,
          DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT);
  this.finalizedDir = new LDir(finalizedDir, maxBlocksPerDir);
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  this.dfsUsage = new DU(bpDir, conf);
  this.dfsUsage.start();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:53,代码来源:BlockPoolSlice.java

示例10: initialize

/**
 * Initialize the data structures used by this class.
 * IMPORTANT NOTE: This method must be called once before calling
 * any other public method on this class.
 * <p/>
 * Creates a singleton master populated storage
 * directory for a Namenode (contains edits, fsimage,
 * version, and time files) and a Datanode (contains version and
 * block files).  This can be a lengthy operation.
 */
public static void initialize() throws Exception {
  createEmptyDirs(new String[]{TEST_ROOT_DIR.toString()});
  Configuration config = new HdfsConfiguration();
  config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
      datanodeStorage.toString());
  MiniDFSCluster cluster = null;
  try {
    // format data-node
    createEmptyDirs(new String[]{datanodeStorage.toString()});
    
    // format and start NameNode and start DataNode
    DFSTestUtil.formatNameNode(config);
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1)
        .startupOption(StartupOption.REGULAR).format(false)
        .manageDataDfsDirs(false).manageNameDfsDirs(false).build();

    NamenodeProtocols namenode = cluster.getNameNodeRpc();
    namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
    namenodeStorageFsscTime = namenode.versionRequest().getCTime();
    namenodeStorageClusterID = namenode.versionRequest().getClusterID();
    namenodeStorageBlockPoolID = namenode.versionRequest().getBlockPoolID();
    
    FileSystem fs = FileSystem.get(config);
    Path baseDir = new Path("/TestUpgrade");
    fs.mkdirs(baseDir);
    
    // write some files
    int bufferSize = 4096;
    byte[] buffer = new byte[bufferSize];
    for (int i = 0; i < bufferSize; i++) {
      buffer[i] = (byte) ('0' + i % 50);
    }
    writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize);
    writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize);

    
    // write more files
    writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
    writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
  } finally {
    // shutdown
    if (cluster != null) {
      cluster.shutdown();
    }
    FileUtil.fullyDelete(new File(namenodeStorage, "in_use.lock"));
    FileUtil.fullyDelete(new File(datanodeStorage, "in_use.lock"));
  }
  File dnCurDir = new File(datanodeStorage, "current");
  datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir);
  
  String bpid = cluster.getNamesystem(0).getBlockPoolId();
  File bpCurDir =
      new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir), "current");
  blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir);
  
  File bpCurFinalizeDir =
      new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
          "current/" + DataStorage.STORAGE_DIR_FINALIZED);
  blockPoolFinalizedStorageChecksum =
      checksumContents(DATA_NODE, bpCurFinalizeDir);
  
  File bpCurRbwDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
      "current/" + DataStorage.STORAGE_DIR_RBW);
  blockPoolRbwStorageChecksum = checksumContents(DATA_NODE, bpCurRbwDir);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:75,代码来源:UpgradeUtilities.java

示例11: BlockPoolSlice

/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  final File finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  final int maxBlocksPerDir = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY,
      DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT);
  this.finalizedDir = new LDir(finalizedDir, maxBlocksPerDir);
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  // Use cached value initially if available. Or the following call will
  // block until the initial du command completes.
  this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
  this.dfsUsage.start();

  // Make the dfs usage to be saved during shutdown.
  ShutdownHookManager.get().addShutdownHook(
    new Runnable() {
      @Override
      public void run() {
        if (!dfsUsedSaved) {
          saveDfsUsed();
        }
      }
    }, SHUTDOWN_HOOK_PRIORITY);
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:62,代码来源:BlockPoolSlice.java

示例12: getFinalizedDir

/**
 * Get finalized directory for a block pool
 * @param storageDir storage directory
 * @param bpid Block pool Id
 * @return finalized directory for a block pool
 */
public static File getFinalizedDir(File storageDir, String bpid) {
  return new File(getBPDir(storageDir, bpid, Storage.STORAGE_DIR_CURRENT)
      + DataStorage.STORAGE_DIR_FINALIZED );
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:MiniDFSCluster.java

示例13: getFinalizedDir

/**
 * Get finalized directory for a block pool
 *
 * @param storageDir
 *     storage directory
 * @param bpid
 *     Block pool Id
 * @return finalized directory for a block pool
 */
public static File getFinalizedDir(File storageDir, String bpid) {
  return new File(getBPDir(storageDir, bpid, Storage.STORAGE_DIR_CURRENT) +
      DataStorage.STORAGE_DIR_FINALIZED);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:13,代码来源:MiniDFSCluster.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.DataStorage.STORAGE_DIR_FINALIZED属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。