当前位置: 首页>>代码示例>>Java>>正文


Java DFSTestUtil.createFile方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.createFile方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.createFile方法的具体用法?Java DFSTestUtil.createFile怎么用?Java DFSTestUtil.createFile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DFSTestUtil的用法示例。


在下文中一共展示了DFSTestUtil.createFile方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testEnterSafeModeInANNShouldNotThrowNPE

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test case for enter safemode in active namenode, when it is already in startup safemode.
 * It is a regression test for HDFS-2747.
 */
@Test
public void testEnterSafeModeInANNShouldNotThrowNPE() throws Exception {
  banner("Restarting active");
  DFSTestUtil
    .createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
  restartActive();
  nn0.getRpcServer().transitionToActive(
      new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));

  FSNamesystem namesystem = nn0.getNamesystem();
  String status = namesystem.getSafemode();
  assertTrue("Bad safemode status: '" + status + "'", status
      .startsWith("Safe mode is ON."));
  NameNodeAdapter.enterSafeMode(nn0, false);
  assertTrue("Failed to enter into safemode in active", namesystem
      .isInSafeMode());
  NameNodeAdapter.enterSafeMode(nn0, false);
  assertTrue("Failed to enter into safemode in active", namesystem
      .isInSafeMode());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestHASafeMode.java

示例2: testDatanodeRollingUpgradeWithFinalize

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test (timeout=600000)
public void testDatanodeRollingUpgradeWithFinalize() throws Exception {
  try {
    startCluster();

    // Create files in DFS.
    Path testFile1 = new Path("/" + GenericTestUtils.getMethodName() + ".01.dat");
    Path testFile2 = new Path("/" + GenericTestUtils.getMethodName() + ".02.dat");
    DFSTestUtil.createFile(fs, testFile1, FILE_SIZE, REPL_FACTOR, SEED);
    DFSTestUtil.createFile(fs, testFile2, FILE_SIZE, REPL_FACTOR, SEED);

    startRollingUpgrade();
    File blockFile = getBlockForFile(testFile2, true);
    File trashFile = getTrashFileForBlock(blockFile, false);
    deleteAndEnsureInTrash(testFile2, blockFile, trashFile);
    finalizeRollingUpgrade();

    // Ensure that delete file testFile2 stays deleted after finalize
    assertFalse(isTrashRootPresent());
    assert(!fs.exists(testFile2));
    assert(fs.exists(testFile1));

  } finally {
    shutdownCluster();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestDataNodeRollingUpgrade.java

示例3: testQuotaByStorageTypeWithTraditionalQuota

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Both traditional space quota and the storage type quota for SSD are set and
 * not exceeded.
 */
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithTraditionalQuota() throws Exception {
  final Path foo = new Path(dir, "foo");
  dfs.mkdirs(foo);

  dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 10);
  dfs.setQuota(foo, Long.MAX_VALUE - 1, REPLICATION * BLOCKSIZE * 10);

  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());

  Path createdFile = new Path(foo, "created_file.data");
  long fileLen = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16,
      fileLen, BLOCKSIZE, REPLICATION, seed);

  QuotaCounts cnt = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  assertEquals(2, cnt.getNameSpace());
  assertEquals(fileLen * REPLICATION, cnt.getStorageSpace());

  dfs.delete(createdFile, true);

  QuotaCounts cntAfterDelete = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  assertEquals(1, cntAfterDelete.getNameSpace());
  assertEquals(0, cntAfterDelete.getStorageSpace());

  // Validate the computeQuotaUsage()
  QuotaCounts counts = new QuotaCounts.Builder().build();
  fnode.computeQuotaUsage(fsn.getBlockManager().getStoragePolicySuite(), counts, true);
  assertEquals(fnode.dumpTreeRecursively().toString(), 1,
      counts.getNameSpace());
  assertEquals(fnode.dumpTreeRecursively().toString(), 0,
      counts.getStorageSpace());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestQuotaByStorageType.java

示例4: testConcatWithQuotaDecrease

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * make sure we update the quota correctly after concat
 */
@Test
public void testConcatWithQuotaDecrease() throws IOException {
  final short srcRepl = 3; // note this is different with REPL_FACTOR
  final int srcNum = 10;
  final Path foo = new Path("/foo");
  final Path[] srcs = new Path[srcNum];
  final Path target = new Path(foo, "target");
  DFSTestUtil.createFile(dfs, target, blockSize, REPL_FACTOR, 0L);

  dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);

  for (int i = 0; i < srcNum; i++) {
    srcs[i] = new Path(foo, "src" + i);
    DFSTestUtil.createFile(dfs, srcs[i], blockSize * 2, srcRepl, 0L);
  }

  ContentSummary summary = dfs.getContentSummary(foo);
  Assert.assertEquals(11, summary.getFileCount());
  Assert.assertEquals(blockSize * REPL_FACTOR +
          blockSize * 2 * srcRepl * srcNum, summary.getSpaceConsumed());

  dfs.concat(target, srcs);
  summary = dfs.getContentSummary(foo);
  Assert.assertEquals(1, summary.getFileCount());
  Assert.assertEquals(
      blockSize * REPL_FACTOR + blockSize * 2 * REPL_FACTOR * srcNum,
      summary.getSpaceConsumed());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestHDFSConcat.java

示例5: testReservedFileNames

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Check /.reserved path is reserved and cannot be created.
 */
@Test
public void testReservedFileNames() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    // First start a cluster with reserved file names check turned off
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    
    // Creation of directory or file with reserved path names is disallowed
    ensureReservedFileNamesCannotBeCreated(fs, "/.reserved", false);
    ensureReservedFileNamesCannotBeCreated(fs, "/.reserved", false);
    Path reservedPath = new Path("/.reserved");
    
    // Loading of fsimage or editlog with /.reserved directory should fail
    // Mkdir "/.reserved reserved path with reserved path check turned off
    FSDirectory.CHECK_RESERVED_FILE_NAMES = false;
    fs.mkdirs(reservedPath);
    assertTrue(fs.isDirectory(reservedPath));
    ensureReservedFileNamesCannotBeLoaded(cluster);

    // Loading of fsimage or editlog with /.reserved file should fail
    // Create file "/.reserved reserved path with reserved path check turned off
    FSDirectory.CHECK_RESERVED_FILE_NAMES = false;
    ensureClusterRestartSucceeds(cluster);
    fs.delete(reservedPath, true);
    DFSTestUtil.createFile(fs, reservedPath, 10, (short)1, 0L);
    assertTrue(!fs.isDirectory(reservedPath));
    ensureReservedFileNamesCannotBeLoaded(cluster);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestINodeFile.java

示例6: testFcResolveAfs

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Tests resolution of an hdfs symlink to the local file system.
 * @throws IOException
 * @throws InterruptedException
 */
@Test
public void testFcResolveAfs() throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  FileContext fcLocal = FileContext.getLocalFSFileContext();
  FileContext fcHdfs = FileContext.getFileContext(cluster.getFileSystem()
      .getUri());

  final String localTestRoot = helper.getAbsoluteTestRootDir(fcLocal);
  Path alphaLocalPath = new Path(fcLocal.getDefaultFileSystem().getUri()
      .toString(), new File(localTestRoot, "alpha").getAbsolutePath());
  DFSTestUtil.createFile(FileSystem.getLocal(conf), alphaLocalPath, 16,
      (short) 1, 2);

  Path linkTarget = new Path(fcLocal.getDefaultFileSystem().getUri()
      .toString(), localTestRoot);
  Path hdfsLink = new Path(fcHdfs.getDefaultFileSystem().getUri().toString(),
      "/tmp/link");
  fcHdfs.createSymlink(linkTarget, hdfsLink, true);

  Path alphaHdfsPathViaLink = new Path(fcHdfs.getDefaultFileSystem().getUri()
      .toString()
      + "/tmp/link/alpha");

  Set<AbstractFileSystem> afsList = fcHdfs
      .resolveAbstractFileSystems(alphaHdfsPathViaLink);
  Assert.assertEquals(2, afsList.size());
  for (AbstractFileSystem afs : afsList) {
    if ((!afs.equals(fcHdfs.getDefaultFileSystem()))
        && (!afs.equals(fcLocal.getDefaultFileSystem()))) {
      Assert.fail("Failed to resolve AFS correctly");
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestResolveHdfsSymlink.java

示例7: testQuotaByStorageTypeParentOnChildOff

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testQuotaByStorageTypeParentOnChildOff() throws Exception {
  short replication = 1;
  final Path parent = new Path(dir, "parent");
  final Path child = new Path(parent, "child");
  dfs.mkdirs(parent);
  dfs.mkdirs(child);

  dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  dfs.setQuotaByStorageType(parent, StorageType.SSD, 3 * BLOCKSIZE);

  // Create file of size 2.5 * BLOCKSIZE under child directory
  // Verify parent Quota applies
  Path createdFile1 = new Path(child, "created_file1.data");
  long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
      replication, seed);

  INode fnode = fsdir.getINode4Write(parent.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());
  long currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, currentSSDConsumed);

  // Create the 2nd file of size BLOCKSIZE under child directory and expect quota exceeded exception
  Path createdFile2 = new Path(child, "created_file2.data");
  long file2Len = BLOCKSIZE;

  try {
    DFSTestUtil.createFile(dfs, createdFile2, bufLen, file2Len, BLOCKSIZE, replication, seed);
    fail("Should have failed with QuotaByStorageTypeExceededException ");
  } catch (Throwable t) {
    LOG.info("Got expected exception ", t);
    currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
        .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
    assertEquals(file1Len, currentSSDConsumed);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:TestQuotaByStorageType.java

示例8: prepare

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Override
void prepare() throws Exception {
  final Path targetPath = new Path(target);
  DFSTestUtil.createFile(dfs, targetPath, BlockSize, DataNodes, 0);
  for (int i = 0; i < srcPaths.length; i++) {
    DFSTestUtil.createFile(dfs, srcPaths[i], BlockSize, DataNodes, 0);
  }
  assertEquals(BlockSize, dfs.getFileStatus(targetPath).getLen());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestRetryCacheWithHA.java

示例9: initData3

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
private void initData3(Path dir) throws Exception {
  final Path test = new Path(dir, "test");
  final Path foo = new Path(dir, "foo");
  final Path bar = new Path(dir, "bar");
  final Path f1 = new Path(test, "file");
  final Path f2 = new Path(foo, "file");
  final Path f3 = new Path(bar, "file");

  DFSTestUtil.createFile(dfs, f1, BLOCK_SIZE, DATA_NUM, 0L);
  DFSTestUtil.createFile(dfs, f2, BLOCK_SIZE * 2, DATA_NUM, 1L);
  DFSTestUtil.createFile(dfs, f3, BLOCK_SIZE * 3, DATA_NUM, 2L);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestDistCpSync.java

示例10: testLoadImageWithAppending

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test the fsimage loading while there is file under construction.
 */
@Test (timeout=60000)
public void testLoadImageWithAppending() throws Exception {
  Path sub1 = new Path(dir, "sub1");
  Path sub1file1 = new Path(sub1, "sub1file1");
  Path sub1file2 = new Path(sub1, "sub1file2");
  DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
  
  hdfs.allowSnapshot(dir);
  hdfs.createSnapshot(dir, "s0");
  
  HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));      
  
  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestFSImageWithSnapshot.java

示例11: testAppend

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test for rename1
 */
@Test
public void testAppend() throws Exception {
  String src = "/testNamenodeRetryCache/testAppend/src";
  resetCall();
  // Create a file with partial block
  DFSTestUtil.createFile(filesystem, new Path(src), 128, (short)1, 0L);
  
  // Retried append requests succeed
  newCall();
  LastBlockWithStatus b = nnRpc.append(src, "holder",
      new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
  Assert.assertEquals(b, nnRpc.append(src, "holder",
      new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))));
  Assert.assertEquals(b, nnRpc.append(src, "holder",
      new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))));
  
  // non-retried call fails
  newCall();
  try {
    nnRpc.append(src, "holder",
        new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
    Assert.fail("testAppend - expected exception is not thrown");
  } catch (Exception e) {
    // Expected
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestNamenodeRetryCache.java

示例12: createFile

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/** create a file with a length of <code>fileLen</code> */
private void createFile(String fileName, long fileLen, short replicas) throws IOException {
  Path filePath = new Path(fileName);
  DFSTestUtil.createFile(fs, filePath, fileLen, replicas, rand.nextLong());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:TestNNMetricFilesInGetListingOps.java

示例13: testPendingAndInvalidate

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test if BlockManager can correctly remove corresponding pending records
 * when a file is deleted
 * 
 * @throws Exception
 */
@Test
public void testPendingAndInvalidate() throws Exception {
  final Configuration CONF = new HdfsConfiguration();
  CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
      DFS_REPLICATION_INTERVAL);
  CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
      DFS_REPLICATION_INTERVAL);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
      DATANODE_COUNT).build();
  cluster.waitActive();
  
  FSNamesystem namesystem = cluster.getNamesystem();
  BlockManager bm = namesystem.getBlockManager();
  DistributedFileSystem fs = cluster.getFileSystem();
  try {
    // 1. create a file
    Path filePath = new Path("/tmp.txt");
    DFSTestUtil.createFile(fs, filePath, 1024, (short) 3, 0L);
    
    // 2. disable the heartbeats
    for (DataNode dn : cluster.getDataNodes()) {
      DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
    }
    
    // 3. mark a couple of blocks as corrupt
    LocatedBlock block = NameNodeAdapter.getBlockLocations(
        cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
    cluster.getNamesystem().writeLock();
    try {
      bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
          "STORAGE_ID", "TEST");
      bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[1],
          "STORAGE_ID", "TEST");
    } finally {
      cluster.getNamesystem().writeUnlock();
    }
    BlockManagerTestUtil.computeAllPendingWork(bm);
    BlockManagerTestUtil.updateState(bm);
    assertEquals(bm.getPendingReplicationBlocksCount(), 1L);
    assertEquals(bm.pendingReplications.getNumReplicas(block.getBlock()
        .getLocalBlock()), 2);
    
    // 4. delete the file
    fs.delete(filePath, true);
    // retry at most 10 times, each time sleep for 1s. Note that 10s is much
    // less than the default pending record timeout (5~10min)
    int retries = 10; 
    long pendingNum = bm.getPendingReplicationBlocksCount();
    while (pendingNum != 0 && retries-- > 0) {
      Thread.sleep(1000);  // let NN do the deletion
      BlockManagerTestUtil.updateState(bm);
      pendingNum = bm.getPendingReplicationBlocksCount();
    }
    assertEquals(pendingNum, 0L);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:66,代码来源:TestPendingReplication.java

示例14: modify

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Override
void modify() throws Exception {
  DFSTestUtil.createFile(fs, file, fileLen,
      REPLICATION, seed);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:TestSnapshot.java

示例15: testRenameDirAndDeleteSnapshot_6

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Rename and deletion snapshot under the same the snapshottable directory.
 */
@Test
public void testRenameDirAndDeleteSnapshot_6() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(dir2);
  
  final Path foo = new Path(dir2, "foo");
  final Path bar = new Path(foo, "bar");
  final Path file = new Path(bar, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED);
  
  // take a snapshot on /test
  SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
  
  // delete /test/dir2/foo/bar/file after snapshot s0, so that there is a 
  // snapshot copy recorded in bar
  hdfs.delete(file, true);
  
  // rename foo from dir2 to dir1
  final Path newfoo = new Path(dir1, foo.getName());
  hdfs.rename(foo, newfoo);
  
  final Path foo_s0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
      "dir2/foo");
  assertTrue("the snapshot path " + foo_s0 + " should exist",
      hdfs.exists(foo_s0));
  
  // delete snapshot s0. The deletion will first go down through dir1, and 
  // find foo in the created list of dir1. Then it will use null as the prior
  // snapshot and continue the snapshot deletion process in the subtree of 
  // foo. We need to make sure the snapshot s0 can be deleted cleanly in the
  // foo subtree.
  hdfs.deleteSnapshot(test, "s0");
  // check the internal
  assertFalse("after deleting s0, " + foo_s0 + " should not exist",
      hdfs.exists(foo_s0));
  INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString())
      .asDirectory();
  assertTrue("the diff list of " + dir2
      + " should be empty after deleting s0", dir2Node.getDiffs().asList()
      .isEmpty());
  
  assertTrue(hdfs.exists(newfoo));
  INode fooRefNode = fsdir.getINode4Write(newfoo.toString());
  assertTrue(fooRefNode instanceof INodeReference.DstReference);
  INodeDirectory fooNode = fooRefNode.asDirectory();
  // fooNode should be still INodeDirectory (With Snapshot) since we call
  // recordModification before the rename
  assertTrue(fooNode.isWithSnapshot());
  assertTrue(fooNode.getDiffs().asList().isEmpty());
  INodeDirectory barNode = fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID)
      .get(0).asDirectory();
  // bar should also be INodeDirectory (With Snapshot), and both of its diff 
  // list and children list are empty 
  assertTrue(barNode.getDiffs().asList().isEmpty());
  assertTrue(barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
  
  restartClusterAndCheckImage(true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:65,代码来源:TestRenameWithSnapshots.java


注:本文中的org.apache.hadoop.hdfs.DFSTestUtil.createFile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。