当前位置: 首页>>代码示例>>Java>>正文


Java DistributedFileSystem.delete方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DistributedFileSystem.delete方法的典型用法代码示例。如果您正苦于以下问题:Java DistributedFileSystem.delete方法的具体用法?Java DistributedFileSystem.delete怎么用?Java DistributedFileSystem.delete使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DistributedFileSystem的用法示例。


在下文中一共展示了DistributedFileSystem.delete方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testEditsLogOldRename

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogOldRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  oldRename(src1, dst1, true, false);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestHDFSFileContextMainOperations.java

示例2: testEditsLogRename

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  rename(src1, dst1, true, true, false, Rename.OVERWRITE);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestHDFSFileContextMainOperations.java

示例3: deleteTargetTmpDir

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
private static void deleteTargetTmpDir(DistributedFileSystem targetFs,
    Path tmpDir) {
  try {
    if (tmpDir != null) {
      targetFs.delete(tmpDir, true);
    }
  } catch (IOException e) {
    DistCp.LOG.error("Unable to cleanup tmp dir: " + tmpDir, e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:DistCpSync.java

示例4: testFilesInGetListingOps

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
@Test
public void testFilesInGetListingOps() throws Exception {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem hdfs = cluster.getFileSystem();
    final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();

    hdfs.mkdirs(new Path("/tmp"));
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0);

    DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp",
        HdfsFileStatus.EMPTY_NAME, false);
    assertTrue(dl.getPartialListing().length == 3);

    String f2 = new String("f2");
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false);
    assertTrue(dl.getPartialListing().length == 1);

    INode f2INode = fsdir.getINode("/tmp/f2");
    String f2InodePath = "/.reserved/.inodes/" + f2INode.getId();
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(),
        false);
    assertTrue(dl.getPartialListing().length == 1);

    // Test the deleted startAfter file
    hdfs.delete(new Path("/tmp/f2"), false);
    try {
      dl = cluster.getNameNodeRpc().getListing("/tmp",
          f2InodePath.getBytes(), false);
      fail("Didn't get exception for the deleted startAfter token.");
    } catch (IOException e) {
      assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
    }

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:46,代码来源:TestINodeFile.java

示例5: testPendingAndInvalidate

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Test if BlockManager can correctly remove corresponding pending records
 * when a file is deleted
 * 
 * @throws Exception
 */
@Test
public void testPendingAndInvalidate() throws Exception {
  final Configuration CONF = new HdfsConfiguration();
  CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
      DFS_REPLICATION_INTERVAL);
  CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
      DFS_REPLICATION_INTERVAL);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
      DATANODE_COUNT).build();
  cluster.waitActive();
  
  FSNamesystem namesystem = cluster.getNamesystem();
  BlockManager bm = namesystem.getBlockManager();
  DistributedFileSystem fs = cluster.getFileSystem();
  try {
    // 1. create a file
    Path filePath = new Path("/tmp.txt");
    DFSTestUtil.createFile(fs, filePath, 1024, (short) 3, 0L);
    
    // 2. disable the heartbeats
    for (DataNode dn : cluster.getDataNodes()) {
      DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
    }
    
    // 3. mark a couple of blocks as corrupt
    LocatedBlock block = NameNodeAdapter.getBlockLocations(
        cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
    cluster.getNamesystem().writeLock();
    try {
      bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
          "STORAGE_ID", "TEST");
      bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[1],
          "STORAGE_ID", "TEST");
    } finally {
      cluster.getNamesystem().writeUnlock();
    }
    BlockManagerTestUtil.computeAllPendingWork(bm);
    BlockManagerTestUtil.updateState(bm);
    assertEquals(bm.getPendingReplicationBlocksCount(), 1L);
    assertEquals(bm.pendingReplications.getNumReplicas(block.getBlock()
        .getLocalBlock()), 2);
    
    // 4. delete the file
    fs.delete(filePath, true);
    // retry at most 10 times, each time sleep for 1s. Note that 10s is much
    // less than the default pending record timeout (5~10min)
    int retries = 10; 
    long pendingNum = bm.getPendingReplicationBlocksCount();
    while (pendingNum != 0 && retries-- > 0) {
      Thread.sleep(1000);  // let NN do the deletion
      BlockManagerTestUtil.updateState(bm);
      pendingNum = bm.getPendingReplicationBlocksCount();
    }
    assertEquals(pendingNum, 0L);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:66,代码来源:TestPendingReplication.java


注:本文中的org.apache.hadoop.hdfs.DistributedFileSystem.delete方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。