当前位置: 首页>>代码示例>>Java>>正文


Java DU类代码示例

本文整理汇总了Java中org.apache.hadoop.fs.DU的典型用法代码示例。如果您正苦于以下问题:Java DU类的具体用法?Java DU怎么用?Java DU使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DU类属于org.apache.hadoop.fs包,在下文中一共展示了DU类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: FSVolume

import org.apache.hadoop.fs.DU; //导入依赖的package包/类
FSVolume(FSDataset dataset, File currentDir, Configuration conf) throws IOException {
  this.currentDir = currentDir; 
  File parent = currentDir.getParentFile();
  this.usage = new DF(parent, conf);
  this.reserved = usage.getReserved();
  this.dataset = dataset;
  this.namespaceMap = new NamespaceMap();
  this.dfsUsage = new DU(currentDir, conf);
  this.dfsUsage.start();
  this.nativeIOExecutor = Executors.newSingleThreadExecutor();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:12,代码来源:FSDataset.java

示例2: FSVolume

import org.apache.hadoop.fs.DU; //导入依赖的package包/类
FSVolume(File currentDir, Configuration conf) throws IOException {
  this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
  this.currentDir = currentDir; 
  File parent = currentDir.getParentFile();
  final File finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(parent, "tmp");
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  this.dataDir = new FSDir(finalizedDir);
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  this.usage = new DF(parent, conf);
  this.dfsUsage = new DU(parent, conf);
  this.dfsUsage.start();
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:36,代码来源:FSDataset.java

示例3: FSVolume

import org.apache.hadoop.fs.DU; //导入依赖的package包/类
FSVolume(FSDataset dataset, File currentDir, Configuration conf) throws IOException {
  this.currentDir = currentDir; 
  File parent = currentDir.getParentFile();
  this.usage = new DF(parent, conf);
  this.reserved = usage.getReserved();
  this.dataset = dataset;
  this.namespaceMap = new NamespaceMap();
  this.dfsUsage = new DU(currentDir, conf);
  this.dfsUsage.start();
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:11,代码来源:FSDataset.java

示例4: BlockPoolSlice

import org.apache.hadoop.fs.DU; //导入依赖的package包/类
/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf configuration
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  this.finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);
  this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST);
  if (!this.finalizedDir.exists()) {
    if (!this.finalizedDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + this.finalizedDir);
    }
  }

  this.deleteDuplicateReplicas = conf.getBoolean(
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  // Use cached value initially if available. Or the following call will
  // block until the initial du command completes.
  this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
  this.dfsUsage.start();

  // Make the dfs usage to be saved during shutdown.
  ShutdownHookManager.get().addShutdownHook(
    new Runnable() {
      @Override
      public void run() {
        if (!dfsUsedSaved) {
          saveDfsUsed();
        }
      }
    }, SHUTDOWN_HOOK_PRIORITY);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:69,代码来源:BlockPoolSlice.java

示例5: BlockPoolSlice

import org.apache.hadoop.fs.DU; //导入依赖的package包/类
/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf configuration
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  this.finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);
  this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST);
  if (!this.finalizedDir.exists()) {
    if (!this.finalizedDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + this.finalizedDir);
    }
  }

  this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(conf);

  this.deleteDuplicateReplicas = conf.getBoolean(
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  // Use cached value initially if available. Or the following call will
  // block until the initial du command completes.
  this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
  this.dfsUsage.start();

  // Make the dfs usage to be saved during shutdown.
  ShutdownHookManager.get().addShutdownHook(
    new Runnable() {
      @Override
      public void run() {
        if (!dfsUsedSaved) {
          saveDfsUsed();
        }
      }
    }, SHUTDOWN_HOOK_PRIORITY);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:65,代码来源:BlockPoolSlice.java

示例6: BlockPoolSlice

import org.apache.hadoop.fs.DU; //导入依赖的package包/类
/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  final File finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  final int maxBlocksPerDir = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY,
      DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT);
  this.finalizedDir = new LDir(finalizedDir, maxBlocksPerDir);
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  this.dfsUsage = new DU(bpDir, conf);
  this.dfsUsage.start();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:50,代码来源:BlockPoolSlice.java

示例7: BlockPoolSlice

import org.apache.hadoop.fs.DU; //导入依赖的package包/类
/**
 * Create a blook pool slice
 *
 * @param bpid
 *     Block pool Id
 * @param volume
 *     {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir
 *     directory corresponding to the BlockPool
 * @param conf
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
  final File finalizedDir =
      new File(currentDir, DataStorage.STORAGE_DIR_FINALIZED);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends =
      conf.getBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
          DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  final int maxBlocksPerDir =
      conf.getInt(DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY,
          DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT);
  this.finalizedDir = new LDir(finalizedDir, maxBlocksPerDir);
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  this.dfsUsage = new DU(bpDir, conf);
  this.dfsUsage.start();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:54,代码来源:BlockPoolSlice.java

示例8: FSVolume

import org.apache.hadoop.fs.DU; //导入依赖的package包/类
FSVolume(File currentDir, Configuration conf) throws IOException {
  this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
  this.dataDir = new FSDir(currentDir);
  this.currentDir = currentDir;
  boolean supportAppends = conf.getBoolean("dfs.support.append", false);
  File parent = currentDir.getParentFile();

  this.detachDir = new File(parent, "detach");
  if (detachDir.exists()) {
    recoverDetachedBlocks(currentDir, detachDir);
  }

  // remove all blocks from "tmp" directory. These were either created
  // by pre-append clients (0.18.x) or are part of replication request.
  // They can be safely removed.
  this.tmpDir = new File(parent, "tmp");
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  
  // Files that were being written when the datanode was last shutdown
  // should not be deleted.
  blocksBeingWritten = new File(parent, "blocksBeingWritten");
  if (blocksBeingWritten.exists()) {
    if (supportAppends) {  
      recoverBlocksBeingWritten(blocksBeingWritten);
    } else {
      FileUtil.fullyDelete(blocksBeingWritten);
    }
  }
  
  if (!blocksBeingWritten.mkdirs()) {
    if (!blocksBeingWritten.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + blocksBeingWritten.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  if (!detachDir.mkdirs()) {
    if (!detachDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + detachDir.toString());
    }
  }
  this.usage = new DF(parent, conf);
  this.dfsUsage = new DU(parent, conf);
  this.dfsUsage.start();
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:51,代码来源:FSDataset.java

示例9: BlockPoolSlice

import org.apache.hadoop.fs.DU; //导入依赖的package包/类
/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  final File finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  final int maxBlocksPerDir = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY,
      DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT);
  this.finalizedDir = new LDir(finalizedDir, maxBlocksPerDir);
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  // Use cached value initially if available. Or the following call will
  // block until the initial du command completes.
  this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
  this.dfsUsage.start();

  // Make the dfs usage to be saved during shutdown.
  ShutdownHookManager.get().addShutdownHook(
    new Runnable() {
      @Override
      public void run() {
        if (!dfsUsedSaved) {
          saveDfsUsed();
        }
      }
    }, SHUTDOWN_HOOK_PRIORITY);
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:63,代码来源:BlockPoolSlice.java


注:本文中的org.apache.hadoop.fs.DU类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。