當前位置: 首頁>>代碼示例>>Java>>正文


Java DistributedFileSystem.delete方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hdfs.DistributedFileSystem.delete方法的典型用法代碼示例。如果您正苦於以下問題:Java DistributedFileSystem.delete方法的具體用法?Java DistributedFileSystem.delete怎麽用?Java DistributedFileSystem.delete使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hdfs.DistributedFileSystem的用法示例。


在下文中一共展示了DistributedFileSystem.delete方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testEditsLogOldRename

import org.apache.hadoop.hdfs.DistributedFileSystem; //導入方法依賴的package包/類
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogOldRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  oldRename(src1, dst1, true, false);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:29,代碼來源:TestHDFSFileContextMainOperations.java

示例2: testEditsLogRename

import org.apache.hadoop.hdfs.DistributedFileSystem; //導入方法依賴的package包/類
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  rename(src1, dst1, true, true, false, Rename.OVERWRITE);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:29,代碼來源:TestHDFSFileContextMainOperations.java

示例3: deleteTargetTmpDir

import org.apache.hadoop.hdfs.DistributedFileSystem; //導入方法依賴的package包/類
private static void deleteTargetTmpDir(DistributedFileSystem targetFs,
    Path tmpDir) {
  try {
    if (tmpDir != null) {
      targetFs.delete(tmpDir, true);
    }
  } catch (IOException e) {
    DistCp.LOG.error("Unable to cleanup tmp dir: " + tmpDir, e);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:11,代碼來源:DistCpSync.java

示例4: testFilesInGetListingOps

import org.apache.hadoop.hdfs.DistributedFileSystem; //導入方法依賴的package包/類
@Test
public void testFilesInGetListingOps() throws Exception {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem hdfs = cluster.getFileSystem();
    final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();

    hdfs.mkdirs(new Path("/tmp"));
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0);

    DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp",
        HdfsFileStatus.EMPTY_NAME, false);
    assertTrue(dl.getPartialListing().length == 3);

    String f2 = new String("f2");
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false);
    assertTrue(dl.getPartialListing().length == 1);

    INode f2INode = fsdir.getINode("/tmp/f2");
    String f2InodePath = "/.reserved/.inodes/" + f2INode.getId();
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(),
        false);
    assertTrue(dl.getPartialListing().length == 1);

    // Test the deleted startAfter file
    hdfs.delete(new Path("/tmp/f2"), false);
    try {
      dl = cluster.getNameNodeRpc().getListing("/tmp",
          f2InodePath.getBytes(), false);
      fail("Didn't get exception for the deleted startAfter token.");
    } catch (IOException e) {
      assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
    }

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:46,代碼來源:TestINodeFile.java

示例5: testPendingAndInvalidate

import org.apache.hadoop.hdfs.DistributedFileSystem; //導入方法依賴的package包/類
/**
 * Test if BlockManager can correctly remove corresponding pending records
 * when a file is deleted
 * 
 * @throws Exception
 */
@Test
public void testPendingAndInvalidate() throws Exception {
  final Configuration CONF = new HdfsConfiguration();
  CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
      DFS_REPLICATION_INTERVAL);
  CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
      DFS_REPLICATION_INTERVAL);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
      DATANODE_COUNT).build();
  cluster.waitActive();
  
  FSNamesystem namesystem = cluster.getNamesystem();
  BlockManager bm = namesystem.getBlockManager();
  DistributedFileSystem fs = cluster.getFileSystem();
  try {
    // 1. create a file
    Path filePath = new Path("/tmp.txt");
    DFSTestUtil.createFile(fs, filePath, 1024, (short) 3, 0L);
    
    // 2. disable the heartbeats
    for (DataNode dn : cluster.getDataNodes()) {
      DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
    }
    
    // 3. mark a couple of blocks as corrupt
    LocatedBlock block = NameNodeAdapter.getBlockLocations(
        cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
    cluster.getNamesystem().writeLock();
    try {
      bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
          "STORAGE_ID", "TEST");
      bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[1],
          "STORAGE_ID", "TEST");
    } finally {
      cluster.getNamesystem().writeUnlock();
    }
    BlockManagerTestUtil.computeAllPendingWork(bm);
    BlockManagerTestUtil.updateState(bm);
    assertEquals(bm.getPendingReplicationBlocksCount(), 1L);
    assertEquals(bm.pendingReplications.getNumReplicas(block.getBlock()
        .getLocalBlock()), 2);
    
    // 4. delete the file
    fs.delete(filePath, true);
    // retry at most 10 times, each time sleep for 1s. Note that 10s is much
    // less than the default pending record timeout (5~10min)
    int retries = 10; 
    long pendingNum = bm.getPendingReplicationBlocksCount();
    while (pendingNum != 0 && retries-- > 0) {
      Thread.sleep(1000);  // let NN do the deletion
      BlockManagerTestUtil.updateState(bm);
      pendingNum = bm.getPendingReplicationBlocksCount();
    }
    assertEquals(pendingNum, 0L);
  } finally {
    cluster.shutdown();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:66,代碼來源:TestPendingReplication.java


注:本文中的org.apache.hadoop.hdfs.DistributedFileSystem.delete方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。