当前位置: 首页>>代码示例>>Java>>正文


Java DFSTestUtil类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil类的具体用法?Java DFSTestUtil怎么用?Java DFSTestUtil使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


DFSTestUtil类属于org.apache.hadoop.hdfs包,在下文中一共展示了DFSTestUtil类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testConcat

import org.apache.hadoop.hdfs.DFSTestUtil; //导入依赖的package包/类
private void testConcat() throws Exception {
  Configuration config = getProxiedFSConf();
  config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  if (!isLocalFS()) {
    FileSystem fs = FileSystem.get(config);
    fs.mkdirs(getProxiedFSTestDir());
    Path path1 = new Path("/test/foo.txt");
    Path path2 = new Path("/test/bar.txt");
    Path path3 = new Path("/test/derp.txt");
    DFSTestUtil.createFile(fs, path1, 1024, (short) 3, 0);
    DFSTestUtil.createFile(fs, path2, 1024, (short) 3, 0);
    DFSTestUtil.createFile(fs, path3, 1024, (short) 3, 0);
    fs.close();
    fs = getHttpFSFileSystem();
    fs.concat(path1, new Path[]{path2, path3});
    fs.close();
    fs = FileSystem.get(config);
    Assert.assertTrue(fs.exists(path1));
    Assert.assertFalse(fs.exists(path2));
    Assert.assertFalse(fs.exists(path3));
    fs.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:BaseTestHttpFSWith.java

示例2: testWithCheckpoint

import org.apache.hadoop.hdfs.DFSTestUtil; //导入依赖的package包/类
@Test
public void testWithCheckpoint() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.delete(new Path("/test/test"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
  
  // read snapshot file after restart
  String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test2");
  DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
  String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test3");
  DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestOpenFilesWithSnapshot.java

示例3: runOperations

import org.apache.hadoop.hdfs.DFSTestUtil; //导入依赖的package包/类
/**
 * Run file operations to create edits for all op codes
 * to be tested.
 *
 * the following op codes are deprecated and therefore not tested:
 *
 * OP_DATANODE_ADD    ( 5)
 * OP_DATANODE_REMOVE ( 6)
 * OP_SET_NS_QUOTA    (11)
 * OP_CLEAR_NS_QUOTA  (12)
 */
private CheckpointSignature runOperations() throws IOException {
  LOG.info("Creating edits by performing fs operations");
  // no check, if it's not it throws an exception which is what we want
  DistributedFileSystem dfs = cluster.getFileSystem();
  DFSTestUtil.runOperations(cluster, dfs, cluster.getConfiguration(0),
      dfs.getDefaultBlockSize(), 0);

  // OP_ROLLING_UPGRADE_START
  cluster.getNamesystem().getEditLog().logStartRollingUpgrade(Time.now());
  // OP_ROLLING_UPGRADE_FINALIZE
  cluster.getNamesystem().getEditLog().logFinalizeRollingUpgrade(Time.now());

  // Force a roll so we get an OP_END_LOG_SEGMENT txn
  return cluster.getNameNodeRpc().rollEditLog();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:OfflineEditsViewerHelper.java

示例4: testReplicatingAfterRemoveVolume

import org.apache.hadoop.hdfs.DFSTestUtil; //导入依赖的package包/类
@Test(timeout=60000)
public void testReplicatingAfterRemoveVolume()
    throws InterruptedException, TimeoutException, IOException,
    ReconfigurationException {
  startDFSCluster(1, 2);

  final FileSystem fs = cluster.getFileSystem();
  final short replFactor = 2;
  Path testFile = new Path("/test");
  createFile(testFile, 4, replFactor);

  DataNode dn = cluster.getDataNodes().get(0);
  Collection<String> oldDirs = getDataDirs(dn);
  String newDirs = oldDirs.iterator().next();  // Keep the first volume.
  dn.reconfigurePropertyImpl(
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
  assertFileLocksReleased(
      new ArrayList<String>(oldDirs).subList(1, oldDirs.size()));

  triggerDeleteReport(dn);

  waitReplication(fs, testFile, 1, 1);
  DFSTestUtil.waitReplication(fs, testFile, replFactor);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestDataNodeHotSwapVolumes.java

示例5: testPageRounder

import org.apache.hadoop.hdfs.DFSTestUtil; //导入依赖的package包/类
@Test(timeout=60000)
public void testPageRounder() throws Exception {
  // Write a small file
  Path fileName = new Path("/testPageRounder");
  final int smallBlocks = 512; // This should be smaller than the page size
  assertTrue("Page size should be greater than smallBlocks!",
      PAGE_SIZE > smallBlocks);
  final int numBlocks = 5;
  final int fileLen = smallBlocks * numBlocks;
  FSDataOutputStream out =
      fs.create(fileName, false, 4096, (short)1, smallBlocks);
  out.write(new byte[fileLen]);
  out.close();
  HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations(
      fileName, 0, fileLen);
  // Cache the file and check the sizes match the page size
  setHeartbeatResponse(cacheBlocks(locs));
  DFSTestUtil.verifyExpectedCacheUsage(PAGE_SIZE * numBlocks, numBlocks, fsd);
  // Uncache and check that it decrements by the page size too
  setHeartbeatResponse(uncacheBlocks(locs));
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestFsDatasetCache.java

示例6: testCorrectNumberOfBlocksAfterRestart

import org.apache.hadoop.hdfs.DFSTestUtil; //导入依赖的package包/类
@Test
public void testCorrectNumberOfBlocksAfterRestart() throws IOException {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  final Path file = new Path(foo, "file");
  final String snapshotName = "ss0";

  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
  hdfs.mkdirs(bar);
  hdfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
  hdfs.setQuota(bar, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
  hdfs.allowSnapshot(foo);

  hdfs.createSnapshot(foo, snapshotName);
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();

  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  hdfs.deleteSnapshot(foo, snapshotName);
  hdfs.delete(bar, true);
  hdfs.delete(foo, true);

  long numberOfBlocks = cluster.getNamesystem().getBlocksTotal();
  cluster.restartNameNode(0);
  assertEquals(numberOfBlocks, cluster.getNamesystem().getBlocksTotal());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestSnapshotDeletion.java

示例7: testQuotaUpdateWithFileCreate

import org.apache.hadoop.hdfs.DFSTestUtil; //导入依赖的package包/类
/**
 * Test if the quota can be correctly updated for create file
 */
@Test (timeout=60000)
public void testQuotaUpdateWithFileCreate() throws Exception  {
  final Path foo = new Path(dir, "foo");
  Path createdFile = new Path(foo, "created_file.data");
  dfs.mkdirs(foo);
  dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
  long fileLen = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16,
      fileLen, BLOCKSIZE, REPLICATION, seed);
  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());
  QuotaCounts cnt = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  assertEquals(2, cnt.getNameSpace());
  assertEquals(fileLen * REPLICATION, cnt.getStorageSpace());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestDiskspaceQuotaUpdate.java

示例8: testReadSnapshotFileWithCheckpoint

import org.apache.hadoop.hdfs.DFSTestUtil; //导入依赖的package包/类
@Test(timeout = 30000)
public void testReadSnapshotFileWithCheckpoint() throws Exception {
  Path foo = new Path("/foo");
  hdfs.mkdirs(foo);
  hdfs.allowSnapshot(foo);
  Path bar = new Path("/foo/bar");
  DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L);
  hdfs.createSnapshot(foo, "s1");
  assertTrue(hdfs.delete(bar, true));

  // checkpoint
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);

  // restart namenode to load snapshot files from fsimage
  cluster.restartNameNode(true);
  String snapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar");
  DFSTestUtil.readFile(hdfs, new Path(snapshotPath));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestSnapshotBlocksMap.java

示例9: testSetQuota

import org.apache.hadoop.hdfs.DFSTestUtil; //导入依赖的package包/类
@Test (timeout=60000)
public void testSetQuota() throws Exception {
  final Path dir = new Path("/TestSnapshot");
  hdfs.mkdirs(dir);
  // allow snapshot on dir and create snapshot s1
  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
  
  Path sub = new Path(dir, "sub");
  hdfs.mkdirs(sub);
  Path fileInSub = new Path(sub, "file");
  DFSTestUtil.createFile(hdfs, fileInSub, BLOCKSIZE, REPLICATION, seed);
  INodeDirectory subNode = INodeDirectory.valueOf(
      fsdir.getINode(sub.toString()), sub);
  // subNode should be a INodeDirectory, but not an INodeDirectoryWithSnapshot
  assertFalse(subNode.isWithSnapshot());
  
  hdfs.setQuota(sub, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
  subNode = INodeDirectory.valueOf(fsdir.getINode(sub.toString()), sub);
  assertTrue(subNode.isQuotaSet());
  assertFalse(subNode.isWithSnapshot());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestSetQuotaWithSnapshot.java

示例10: createLocatedBlockNoStorageMedia

import org.apache.hadoop.hdfs.DFSTestUtil; //导入依赖的package包/类
private LocatedBlock createLocatedBlockNoStorageMedia() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
                                       AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
                                       AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
                                       AdminStates.NORMAL)
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestPBHelper.java

示例11: testQuotaByStorageTypeParentOffChildOn

import org.apache.hadoop.hdfs.DFSTestUtil; //导入依赖的package包/类
@Test(timeout = 60000)
public void testQuotaByStorageTypeParentOffChildOn() throws Exception {
  final Path parent = new Path(dir, "parent");
  final Path child = new Path(parent, "child");
  dfs.mkdirs(parent);
  dfs.mkdirs(child);

  dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  dfs.setQuotaByStorageType(child, StorageType.SSD, 2 * BLOCKSIZE);

  // Create file of size 2.5 * BLOCKSIZE under child directory
  // Since child directory have SSD quota of 2 * BLOCKSIZE,
  // expect an exception when creating files under child directory.
  Path createdFile1 = new Path(child, "created_file1.data");
  long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  int bufLen = BLOCKSIZE / 16;
  try {
    DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
        REPLICATION, seed);
    fail("Should have failed with QuotaByStorageTypeExceededException ");
  } catch (Throwable t) {
    LOG.info("Got expected exception ", t);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestQuotaByStorageType.java

示例12: testUncacheUnknownBlock

import org.apache.hadoop.hdfs.DFSTestUtil; //导入依赖的package包/类
@Test(timeout=60000)
public void testUncacheUnknownBlock() throws Exception {
  // Create a file
  Path fileName = new Path("/testUncacheUnknownBlock");
  int fileLen = 4096;
  DFSTestUtil.createFile(fs, fileName, fileLen, (short)1, 0xFDFD);
  HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations(
      fileName, 0, fileLen);

  // Try to uncache it without caching it first
  setHeartbeatResponse(uncacheBlocks(locs));

  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return fsd.getNumBlocksFailedToUncache() > 0;
    }
  }, 100, 10000);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestFsDatasetCache.java

示例13: testDeleteDirectoryWithSnapshot2

import org.apache.hadoop.hdfs.DFSTestUtil; //导入依赖的package包/类
/**
 * Deleting directory with snapshottable descendant with snapshots must fail.
 */
@Test (timeout=300000)
public void testDeleteDirectoryWithSnapshot2() throws Exception {
  Path file0 = new Path(sub, "file0");
  Path file1 = new Path(sub, "file1");
  DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
  
  Path subfile1 = new Path(subsub, "file0");
  Path subfile2 = new Path(subsub, "file1");
  DFSTestUtil.createFile(hdfs, subfile1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, subfile2, BLOCKSIZE, REPLICATION, seed);

  // Allow snapshot for subsub1, and create snapshot for it
  hdfs.allowSnapshot(subsub);
  hdfs.createSnapshot(subsub, "s1");

  // Deleting dir while its descedant subsub1 having snapshots should fail
  exception.expect(RemoteException.class);
  String error = subsub.toString()
      + " is snapshottable and already has snapshots";
  exception.expectMessage(error);
  hdfs.delete(dir, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestSnapshotDeletion.java

示例14: changeData

import org.apache.hadoop.hdfs.DFSTestUtil; //导入依赖的package包/类
/**
 * make some changes under the given directory (created in the above way).
 * 1. rename dir/foo/d1 to dir/bar/d1
 * 2. delete dir/bar/d1/f3
 * 3. rename dir/foo to /dir/bar/d1/foo
 * 4. delete dir/bar/d1/foo/f1
 * 5. create file dir/bar/d1/foo/f1 whose size is 2*BLOCK_SIZE
 * 6. append one BLOCK to file dir/bar/f2
 * 7. rename dir/bar to dir/foo
 *
 * Thus after all these ops the subtree looks like this:
 *                       dir/
 *                       foo/
 *                 d1/    f2(A)    d2/
 *                foo/             f4
 *                f1(new)
 */
private void changeData(Path dir) throws Exception {
  final Path foo = new Path(dir, "foo");
  final Path bar = new Path(dir, "bar");
  final Path d1 = new Path(foo, "d1");
  final Path f2 = new Path(bar, "f2");

  final Path bar_d1 = new Path(bar, "d1");
  dfs.rename(d1, bar_d1);
  final Path f3 = new Path(bar_d1, "f3");
  dfs.delete(f3, true);
  final Path newfoo = new Path(bar_d1, "foo");
  dfs.rename(foo, newfoo);
  final Path f1 = new Path(newfoo, "f1");
  dfs.delete(f1, true);
  DFSTestUtil.createFile(dfs, f1, 2 * BLOCK_SIZE, DATA_NUM, 0);
  DFSTestUtil.appendFile(dfs, f2, (int) BLOCK_SIZE);
  dfs.rename(bar, new Path(dir, "foo"));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestDistCpSync.java

示例15: createLocatedBlock

import org.apache.hadoop.hdfs.DFSTestUtil; //导入依赖的package包/类
private LocatedBlock createLocatedBlock() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
          AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
          AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
          AdminStates.NORMAL),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
          AdminStates.NORMAL),
  };
  String[] storageIDs = {"s1", "s2", "s3", "s4"};
  StorageType[] media = {
      StorageType.DISK,
      StorageType.SSD,
      StorageType.DISK,
      StorageType.RAM_DISK
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53),
      dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestPBHelper.java


注:本文中的org.apache.hadoop.hdfs.DFSTestUtil类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。