当前位置: 首页>>代码示例>>Java>>正文


Java SnapshotTestHelper.createSnapshot方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.createSnapshot方法的典型用法代码示例。如果您正苦于以下问题:Java SnapshotTestHelper.createSnapshot方法的具体用法?Java SnapshotTestHelper.createSnapshot怎么用?Java SnapshotTestHelper.createSnapshot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper的用法示例。


在下文中一共展示了SnapshotTestHelper.createSnapshot方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: prepare

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
/**
 * Create files/directories/snapshots.
 */
void prepare(DistributedFileSystem dfs, short repl) throws Exception {
  for (Path d : dirs) {
    dfs.mkdirs(d);
  }
  for (Path file : files) {
    DFSTestUtil.createFile(dfs, file, fileSize, repl, 0L);
  }
  for (Map.Entry<Path, List<String>> entry : snapshotMap.entrySet()) {
    for (String snapshot : entry.getValue()) {
      SnapshotTestHelper.createSnapshot(dfs, entry.getKey(), snapshot);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestStorageMover.java

示例2: prepare

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
@Override
public void prepare() throws Exception {
  // original size: 2.5 blocks
  DFSTestUtil.createFile(dfs, file, BLOCKSIZE * 2 + BLOCKSIZE / 2,
      REPLICATION, 0L);
  SnapshotTestHelper.createSnapshot(dfs, dir, "s1");

  // truncate to 1.5 block
  dfs.truncate(file, BLOCKSIZE + BLOCKSIZE / 2);
  TestFileTruncate.checkBlockRecovery(file, dfs);

  // append another 1 BLOCK
  DFSTestUtil.appendFile(dfs, file, BLOCKSIZE);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestTruncateQuotaUpdate.java

示例3: testDeleteAddBlockRace

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
private void testDeleteAddBlockRace(boolean hasSnapshot) throws Exception {
  try {
    conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
        SlowBlockPlacementPolicy.class, BlockPlacementPolicy.class);
    cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    final String fileName = "/testDeleteAddBlockRace";
    Path filePath = new Path(fileName);

    FSDataOutputStream out = null;
    out = fs.create(filePath);
    if (hasSnapshot) {
      SnapshotTestHelper.createSnapshot((DistributedFileSystem) fs, new Path(
          "/"), "s1");
    }

    Thread deleteThread = new DeleteThread(fs, filePath);
    deleteThread.start();

    try {
      // write data and syn to make sure a block is allocated.
      out.write(new byte[32], 0, 32);
      out.hsync();
      Assert.fail("Should have failed.");
    } catch (FileNotFoundException e) {
      GenericTestUtils.assertExceptionContains(filePath.getName(), e);
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestDeleteRace.java

示例4: testSetStoragePolicyWithSnapshot

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
@Test
public void testSetStoragePolicyWithSnapshot() throws Exception {
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  final DistributedFileSystem fs = cluster.getFileSystem();
  try {
    final Path dir = new Path("/testSetStoragePolicyWithSnapshot");
    final Path fooDir = new Path(dir, "foo");
    final Path fooFile1= new Path(fooDir, "f1");
    final Path fooFile2= new Path(fooDir, "f2");
    DFSTestUtil.createFile(fs, fooFile1, FILE_LEN, REPLICATION, 0L);
    DFSTestUtil.createFile(fs, fooFile2, FILE_LEN, REPLICATION, 0L);

    fs.setStoragePolicy(fooDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);

    HdfsFileStatus[] dirList = fs.getClient().listPaths(dir.toString(),
        HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
    checkDirectoryListing(dirList, WARM);
    HdfsFileStatus[] fooList = fs.getClient().listPaths(fooDir.toString(),
        HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
    checkDirectoryListing(fooList, WARM, WARM);

    // take snapshot
    SnapshotTestHelper.createSnapshot(fs, dir, "s1");
    // change the storage policy of fooFile1
    fs.setStoragePolicy(fooFile1, HdfsConstants.COLD_STORAGE_POLICY_NAME);

    fooList = fs.getClient().listPaths(fooDir.toString(),
        HdfsFileStatus.EMPTY_NAME).getPartialListing();
    checkDirectoryListing(fooList, COLD, WARM);

    // check the policy for /dir/.snapshot/s1/foo/f1. Note we always return
    // the latest storage policy for a file/directory.
    Path s1f1 = SnapshotTestHelper.getSnapshotPath(dir, "s1", "foo/f1");
    DirectoryListing f1Listing = fs.getClient().listPaths(s1f1.toString(),
        HdfsFileStatus.EMPTY_NAME);
    checkDirectoryListing(f1Listing.getPartialListing(), COLD);

    // delete f1
    fs.delete(fooFile1, true);
    fooList = fs.getClient().listPaths(fooDir.toString(),
        HdfsFileStatus.EMPTY_NAME).getPartialListing();
    checkDirectoryListing(fooList, WARM);
    // check the policy for /dir/.snapshot/s1/foo/f1 again after the deletion
    checkDirectoryListing(fs.getClient().listPaths(s1f1.toString(),
        HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD);

    // change the storage policy of foo dir
    fs.setStoragePolicy(fooDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
    // /dir/foo is now hot
    dirList = fs.getClient().listPaths(dir.toString(),
        HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
    checkDirectoryListing(dirList, HOT);
    // /dir/foo/f2 is hot
    fooList = fs.getClient().listPaths(fooDir.toString(),
        HdfsFileStatus.EMPTY_NAME).getPartialListing();
    checkDirectoryListing(fooList, HOT);

    // check storage policy of snapshot path
    Path s1 = SnapshotTestHelper.getSnapshotRoot(dir, "s1");
    Path s1foo = SnapshotTestHelper.getSnapshotPath(dir, "s1", "foo");
    checkDirectoryListing(fs.getClient().listPaths(s1.toString(),
        HdfsFileStatus.EMPTY_NAME).getPartialListing(), HOT);
    // /dir/.snapshot/.s1/foo/f1 and /dir/.snapshot/.s1/foo/f2 should still
    // follow the latest
    checkDirectoryListing(fs.getClient().listPaths(s1foo.toString(),
        HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD, HOT);

    // delete foo
    fs.delete(fooDir, true);
    checkDirectoryListing(fs.getClient().listPaths(s1.toString(),
        HdfsFileStatus.EMPTY_NAME).getPartialListing(), HOT);
    checkDirectoryListing(fs.getClient().listPaths(s1foo.toString(),
        HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD, HOT);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:80,代码来源:TestBlockStoragePolicy.java

示例5: testQuotaByStorageTypeWithSnapshot

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithSnapshot() throws Exception {
  final Path sub1 = new Path(dir, "Sub1");
  dfs.mkdirs(sub1);

  // Setup ONE_SSD policy and SSD quota of 4 * BLOCKSIZE on sub1
  dfs.setStoragePolicy(sub1, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  dfs.setQuotaByStorageType(sub1, StorageType.SSD, 4 * BLOCKSIZE);

  INode sub1Node = fsdir.getINode4Write(sub1.toString());
  assertTrue(sub1Node.isDirectory());
  assertTrue(sub1Node.isQuotaSet());

  // Create file1 of size 2 * BLOCKSIZE under sub1
  Path file1 = new Path(sub1, "file1");
  long file1Len = 2 * BLOCKSIZE;
  DFSTestUtil.createFile(dfs, file1, file1Len, REPLICATION, seed);

  // Create snapshot on sub1 named s1
  SnapshotTestHelper.createSnapshot(dfs, sub1, "s1");

  // Verify sub1 SSD usage is unchanged after creating snapshot s1
  long ssdConsumed = sub1Node.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumed);

  // Delete file1
  dfs.delete(file1, false);

  // Verify sub1 SSD usage is unchanged due to the existence of snapshot s1
  ssdConsumed = sub1Node.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumed);

  QuotaCounts counts1 = new QuotaCounts.Builder().build();
  sub1Node.computeQuotaUsage(fsn.getBlockManager().getStoragePolicySuite(), counts1, true);
  assertEquals(sub1Node.dumpTreeRecursively().toString(), file1Len,
      counts1.getTypeSpaces().get(StorageType.SSD));

  ContentSummary cs1 = dfs.getContentSummary(sub1);
  assertEquals(cs1.getSpaceConsumed(), file1Len * REPLICATION);
  assertEquals(cs1.getTypeConsumed(StorageType.SSD), file1Len);
  assertEquals(cs1.getTypeConsumed(StorageType.DISK), file1Len * 2);

  // Delete the snapshot s1
  dfs.deleteSnapshot(sub1, "s1");

  // Verify sub1 SSD usage is fully reclaimed and changed to 0
  ssdConsumed = sub1Node.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(0, ssdConsumed);

  QuotaCounts counts2 = new QuotaCounts.Builder().build();
  sub1Node.computeQuotaUsage(fsn.getBlockManager().getStoragePolicySuite(), counts2, true);
  assertEquals(sub1Node.dumpTreeRecursively().toString(), 0,
      counts2.getTypeSpaces().get(StorageType.SSD));

  ContentSummary cs2 = dfs.getContentSummary(sub1);
  assertEquals(cs2.getSpaceConsumed(), 0);
  assertEquals(cs2.getTypeConsumed(StorageType.SSD), 0);
  assertEquals(cs2.getTypeConsumed(StorageType.DISK), 0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:63,代码来源:TestQuotaByStorageType.java

示例6: testSaveLoadImage

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
/**
 * Testing steps:
 * <pre>
 * 1. Creating/modifying directories/files while snapshots are being taken.
 * 2. Dump the FSDirectory tree of the namesystem.
 * 3. Save the namesystem to a temp file (FSImage saving).
 * 4. Restart the cluster and format the namesystem.
 * 5. Load the namesystem from the temp file (FSImage loading).
 * 6. Dump the FSDirectory again and compare the two dumped string.
 * </pre>
 */
@Test
public void testSaveLoadImage() throws Exception {
  int s = 0;
  // make changes to the namesystem
  hdfs.mkdirs(dir);
  SnapshotTestHelper.createSnapshot(hdfs, dir, "s" + ++s);
  Path sub1 = new Path(dir, "sub1");
  hdfs.mkdirs(sub1);
  hdfs.setPermission(sub1, new FsPermission((short)0777));
  Path sub11 = new Path(sub1, "sub11");
  hdfs.mkdirs(sub11);
  checkImage(s);

  hdfs.createSnapshot(dir, "s" + ++s);
  Path sub1file1 = new Path(sub1, "sub1file1");
  Path sub1file2 = new Path(sub1, "sub1file2");
  DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
  checkImage(s);
  
  hdfs.createSnapshot(dir, "s" + ++s);
  Path sub2 = new Path(dir, "sub2");
  Path sub2file1 = new Path(sub2, "sub2file1");
  Path sub2file2 = new Path(sub2, "sub2file2");
  DFSTestUtil.createFile(hdfs, sub2file1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, sub2file2, BLOCKSIZE, REPLICATION, seed);
  checkImage(s);

  hdfs.createSnapshot(dir, "s" + ++s);
  hdfs.setReplication(sub1file1, (short) (REPLICATION - 1));
  hdfs.delete(sub1file2, true);
  hdfs.setOwner(sub2, "dr.who", "unknown");
  hdfs.delete(sub2file1, true);
  checkImage(s);
  
  hdfs.createSnapshot(dir, "s" + ++s);
  Path sub1_sub2file2 = new Path(sub1, "sub2file2");
  hdfs.rename(sub2file2, sub1_sub2file2);
  
  hdfs.rename(sub1file1, sub2file1);
  checkImage(s);
  
  hdfs.rename(sub2file1, sub2file2);
  checkImage(s);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:57,代码来源:TestFSImageWithSnapshot.java

示例7: testSaveLoadImageAfterSnapshotDeletion

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
/**
 * Testing a special case with snapshots. When the following steps happen:
 * <pre>
 * 1. Take snapshot s1 on dir.
 * 2. Create new dir and files under subsubDir, which is descendant of dir.
 * 3. Take snapshot s2 on dir.
 * 4. Delete subsubDir.
 * 5. Delete snapshot s2.
 * </pre>
 * When we merge the diff from s2 to s1 (since we deleted s2), we need to make
 * sure all the files/dirs created after s1 should be destroyed. Otherwise
 * we may save these files/dirs to the fsimage, and cause FileNotFound 
 * Exception while loading fsimage.  
 */
@Test (timeout=300000)
public void testSaveLoadImageAfterSnapshotDeletion()
    throws Exception {
  // create initial dir and subdir
  Path dir = new Path("/dir");
  Path subDir = new Path(dir, "subdir");
  Path subsubDir = new Path(subDir, "subsubdir");
  hdfs.mkdirs(subsubDir);
  
  // take snapshots on subdir and dir
  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
  
  // create new dir under initial dir
  Path newDir = new Path(subsubDir, "newdir");
  Path newFile = new Path(newDir, "newfile");
  hdfs.mkdirs(newDir);
  DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed);
  
  // create another snapshot
  SnapshotTestHelper.createSnapshot(hdfs, dir, "s2");
  
  // delete subsubdir
  hdfs.delete(subsubDir, true);
  
  // delete snapshot s2
  hdfs.deleteSnapshot(dir, "s2");
  
  // restart cluster
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
      .format(false).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
  
  // save namespace to fsimage
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:62,代码来源:TestFSImageWithSnapshot.java

示例8: testQuotaByStorageTypeWithSnapshot

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithSnapshot() throws Exception {
  final Path sub1 = new Path(dir, "Sub1");
  dfs.mkdirs(sub1);

  // Setup ONE_SSD policy and SSD quota of 4 * BLOCKSIZE on sub1
  dfs.setStoragePolicy(sub1, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  dfs.setQuotaByStorageType(sub1, StorageType.SSD, 4 * BLOCKSIZE);

  INode sub1Node = fsdir.getINode4Write(sub1.toString());
  assertTrue(sub1Node.isDirectory());
  assertTrue(sub1Node.isQuotaSet());

  // Create file1 of size 2 * BLOCKSIZE under sub1
  Path file1 = new Path(sub1, "file1");
  long file1Len = 2 * BLOCKSIZE;
  DFSTestUtil.createFile(dfs, file1, file1Len, REPLICATION, seed);

  // Create snapshot on sub1 named s1
  SnapshotTestHelper.createSnapshot(dfs, sub1, "s1");

  // Verify sub1 SSD usage is unchanged after creating snapshot s1
  long ssdConsumed = sub1Node.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumed);

  // Delete file1
  dfs.delete(file1, false);

  // Verify sub1 SSD usage is unchanged due to the existence of snapshot s1
  ssdConsumed = sub1Node.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumed);

  QuotaCounts counts1 = sub1Node.computeQuotaUsage(
      fsn.getBlockManager().getStoragePolicySuite(), true);
  assertEquals(sub1Node.dumpTreeRecursively().toString(), file1Len,
      counts1.getTypeSpaces().get(StorageType.SSD));

  ContentSummary cs1 = dfs.getContentSummary(sub1);
  assertEquals(cs1.getSpaceConsumed(), file1Len * REPLICATION);
  assertEquals(cs1.getTypeConsumed(StorageType.SSD), file1Len);
  assertEquals(cs1.getTypeConsumed(StorageType.DISK), file1Len * 2);

  // Delete the snapshot s1
  dfs.deleteSnapshot(sub1, "s1");

  // Verify sub1 SSD usage is fully reclaimed and changed to 0
  ssdConsumed = sub1Node.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(0, ssdConsumed);

  QuotaCounts counts2 = sub1Node.computeQuotaUsage(
      fsn.getBlockManager().getStoragePolicySuite(), true);
  assertEquals(sub1Node.dumpTreeRecursively().toString(), 0,
      counts2.getTypeSpaces().get(StorageType.SSD));

  ContentSummary cs2 = dfs.getContentSummary(sub1);
  assertEquals(cs2.getSpaceConsumed(), 0);
  assertEquals(cs2.getTypeConsumed(StorageType.SSD), 0);
  assertEquals(cs2.getTypeConsumed(StorageType.DISK), 0);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:63,代码来源:TestQuotaByStorageType.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.createSnapshot方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。