当前位置: 首页>>代码示例>>Java>>正文


Java SnapshotTestHelper.getSnapshotRoot方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.getSnapshotRoot方法的典型用法代码示例。如果您正苦于以下问题:Java SnapshotTestHelper.getSnapshotRoot方法的具体用法?Java SnapshotTestHelper.getSnapshotRoot怎么用?Java SnapshotTestHelper.getSnapshotRoot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper的用法示例。


在下文中一共展示了SnapshotTestHelper.getSnapshotRoot方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testWebHdfsCreateSnapshot

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
/**
 * Test snapshot creation through WebHdfs
 */
@Test
public void testWebHdfsCreateSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);

    try {
      webHdfs.createSnapshot(foo);
      fail("Cannot create snapshot on a non-snapshottable directory");
    } catch (Exception e) {
      GenericTestUtils.assertExceptionContains(
          "Directory is not a snapshottable directory", e);
    }

    // allow snapshots on /foo
    dfs.allowSnapshot(foo);
    // create snapshots on foo using WebHdfs
    webHdfs.createSnapshot(foo, "s1");
    // create snapshot without specifying name
    final Path spath = webHdfs.createSnapshot(foo, null);

    Assert.assertTrue(webHdfs.exists(spath));
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestWebHDFS.java

示例2: testWebHdfsDeleteSnapshot

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
/**
 * Test snapshot deletion through WebHdfs
 */
@Test
public void testWebHdfsDeleteSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);
    dfs.allowSnapshot(foo);

    webHdfs.createSnapshot(foo, "s1");
    final Path spath = webHdfs.createSnapshot(foo, null);
    Assert.assertTrue(webHdfs.exists(spath));
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));

    // delete the two snapshots
    webHdfs.deleteSnapshot(foo, "s1");
    Assert.assertFalse(webHdfs.exists(s1path));
    webHdfs.deleteSnapshot(foo, spath.getName());
    Assert.assertFalse(webHdfs.exists(spath));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestWebHDFS.java

示例3: testWebHdfsRenameSnapshot

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
/**
 * Test snapshot rename through WebHdfs
 */
@Test
public void testWebHdfsRenameSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);
    dfs.allowSnapshot(foo);

    webHdfs.createSnapshot(foo, "s1");
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));

    // rename s1 to s2
    webHdfs.renameSnapshot(foo, "s1", "s2");
    Assert.assertFalse(webHdfs.exists(s1path));
    final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
    Assert.assertTrue(webHdfs.exists(s2path));

    webHdfs.deleteSnapshot(foo, "s2");
    Assert.assertFalse(webHdfs.exists(s2path));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestWebHDFS.java

示例4: checkNamenodeBeforeReturn

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  final Path sPath = SnapshotTestHelper.getSnapshotRoot(new Path(dir),
      snapshotName);
  boolean snapshotCreated = dfs.exists(sPath);
  for (int i = 0; i < CHECKTIMES && !snapshotCreated; i++) {
    Thread.sleep(1000);
    snapshotCreated = dfs.exists(sPath);
  }
  return snapshotCreated;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestRetryCacheWithHA.java

示例5: prepare

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
@Override
void prepare() throws Exception {
  final Path dirPath = new Path(dir);
  if (!dfs.exists(dirPath)) {
    dfs.mkdirs(dirPath);
  }
  
  Path sPath = SnapshotTestHelper.getSnapshotRoot(dirPath, snapshotName);
  if (!dfs.exists(sPath)) {
    dfs.allowSnapshot(dirPath);
    dfs.createSnapshot(dirPath, snapshotName);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestRetryCacheWithHA.java

示例6: testWebHdfsCreateSnapshot

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
/**
 * Test snapshot creation through WebHdfs
 */
@Test
public void testWebHdfsCreateSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsConstants.WEBHDFS_SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);

    try {
      webHdfs.createSnapshot(foo);
      fail("Cannot create snapshot on a non-snapshottable directory");
    } catch (Exception e) {
      GenericTestUtils.assertExceptionContains(
          "Directory is not a snapshottable directory", e);
    }

    // allow snapshots on /foo
    dfs.allowSnapshot(foo);
    // create snapshots on foo using WebHdfs
    webHdfs.createSnapshot(foo, "s1");
    // create snapshot without specifying name
    final Path spath = webHdfs.createSnapshot(foo, null);

    Assert.assertTrue(webHdfs.exists(spath));
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:42,代码来源:TestWebHDFS.java

示例7: testWebHdfsDeleteSnapshot

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
/**
 * Test snapshot deletion through WebHdfs
 */
@Test
public void testWebHdfsDeleteSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsConstants.WEBHDFS_SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);
    dfs.allowSnapshot(foo);

    webHdfs.createSnapshot(foo, "s1");
    final Path spath = webHdfs.createSnapshot(foo, null);
    Assert.assertTrue(webHdfs.exists(spath));
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));

    // delete the two snapshots
    webHdfs.deleteSnapshot(foo, "s1");
    Assert.assertFalse(webHdfs.exists(s1path));
    webHdfs.deleteSnapshot(foo, spath.getName());
    Assert.assertFalse(webHdfs.exists(spath));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:36,代码来源:TestWebHDFS.java

示例8: testWebHdfsRenameSnapshot

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
/**
 * Test snapshot rename through WebHdfs
 */
@Test
public void testWebHdfsRenameSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsConstants.WEBHDFS_SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);
    dfs.allowSnapshot(foo);

    webHdfs.createSnapshot(foo, "s1");
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));

    // rename s1 to s2
    webHdfs.renameSnapshot(foo, "s1", "s2");
    Assert.assertFalse(webHdfs.exists(s1path));
    final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
    Assert.assertTrue(webHdfs.exists(s2path));

    webHdfs.deleteSnapshot(foo, "s2");
    Assert.assertFalse(webHdfs.exists(s2path));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:37,代码来源:TestWebHDFS.java

示例9: testSetStoragePolicyWithSnapshot

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
@Test
public void testSetStoragePolicyWithSnapshot() throws Exception {
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  final DistributedFileSystem fs = cluster.getFileSystem();
  try {
    final Path dir = new Path("/testSetStoragePolicyWithSnapshot");
    final Path fooDir = new Path(dir, "foo");
    final Path fooFile1= new Path(fooDir, "f1");
    final Path fooFile2= new Path(fooDir, "f2");
    DFSTestUtil.createFile(fs, fooFile1, FILE_LEN, REPLICATION, 0L);
    DFSTestUtil.createFile(fs, fooFile2, FILE_LEN, REPLICATION, 0L);

    fs.setStoragePolicy(fooDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);

    HdfsFileStatus[] dirList = fs.getClient().listPaths(dir.toString(),
        HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
    checkDirectoryListing(dirList, WARM);
    HdfsFileStatus[] fooList = fs.getClient().listPaths(fooDir.toString(),
        HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
    checkDirectoryListing(fooList, WARM, WARM);

    // take snapshot
    SnapshotTestHelper.createSnapshot(fs, dir, "s1");
    // change the storage policy of fooFile1
    fs.setStoragePolicy(fooFile1, HdfsConstants.COLD_STORAGE_POLICY_NAME);

    fooList = fs.getClient().listPaths(fooDir.toString(),
        HdfsFileStatus.EMPTY_NAME).getPartialListing();
    checkDirectoryListing(fooList, COLD, WARM);

    // check the policy for /dir/.snapshot/s1/foo/f1. Note we always return
    // the latest storage policy for a file/directory.
    Path s1f1 = SnapshotTestHelper.getSnapshotPath(dir, "s1", "foo/f1");
    DirectoryListing f1Listing = fs.getClient().listPaths(s1f1.toString(),
        HdfsFileStatus.EMPTY_NAME);
    checkDirectoryListing(f1Listing.getPartialListing(), COLD);

    // delete f1
    fs.delete(fooFile1, true);
    fooList = fs.getClient().listPaths(fooDir.toString(),
        HdfsFileStatus.EMPTY_NAME).getPartialListing();
    checkDirectoryListing(fooList, WARM);
    // check the policy for /dir/.snapshot/s1/foo/f1 again after the deletion
    checkDirectoryListing(fs.getClient().listPaths(s1f1.toString(),
        HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD);

    // change the storage policy of foo dir
    fs.setStoragePolicy(fooDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
    // /dir/foo is now hot
    dirList = fs.getClient().listPaths(dir.toString(),
        HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
    checkDirectoryListing(dirList, HOT);
    // /dir/foo/f2 is hot
    fooList = fs.getClient().listPaths(fooDir.toString(),
        HdfsFileStatus.EMPTY_NAME).getPartialListing();
    checkDirectoryListing(fooList, HOT);

    // check storage policy of snapshot path
    Path s1 = SnapshotTestHelper.getSnapshotRoot(dir, "s1");
    Path s1foo = SnapshotTestHelper.getSnapshotPath(dir, "s1", "foo");
    checkDirectoryListing(fs.getClient().listPaths(s1.toString(),
        HdfsFileStatus.EMPTY_NAME).getPartialListing(), HOT);
    // /dir/.snapshot/.s1/foo/f1 and /dir/.snapshot/.s1/foo/f2 should still
    // follow the latest
    checkDirectoryListing(fs.getClient().listPaths(s1foo.toString(),
        HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD, HOT);

    // delete foo
    fs.delete(fooDir, true);
    checkDirectoryListing(fs.getClient().listPaths(s1.toString(),
        HdfsFileStatus.EMPTY_NAME).getPartialListing(), HOT);
    checkDirectoryListing(fs.getClient().listPaths(s1foo.toString(),
        HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD, HOT);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:80,代码来源:TestBlockStoragePolicy.java

示例10: testWebHdfsAllowandDisallowSnapshots

import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; //导入方法依赖的package包/类
/**
 * Test allow and disallow snapshot through WebHdfs. Verifying webhdfs with
 * Distributed filesystem methods.
 */
@Test
public void testWebHdfsAllowandDisallowSnapshots() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final WebHdfsFileSystem webHdfs = WebHdfsTestUtil
        .getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);

    final Path bar = new Path("/bar");
    dfs.mkdirs(bar);

    // allow snapshots on /bar using webhdfs
    webHdfs.allowSnapshot(bar);
    webHdfs.createSnapshot(bar, "s1");
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(bar, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));
    SnapshottableDirectoryStatus[] snapshottableDirs =
        dfs.getSnapshottableDirListing();
    assertEquals(1, snapshottableDirs.length);
    assertEquals(bar, snapshottableDirs[0].getFullPath());
    dfs.deleteSnapshot(bar, "s1");
    dfs.disallowSnapshot(bar);
    snapshottableDirs = dfs.getSnapshottableDirListing();
    assertNull(snapshottableDirs);

    // disallow snapshots on /bar using webhdfs
    dfs.allowSnapshot(bar);
    snapshottableDirs = dfs.getSnapshottableDirListing();
    assertEquals(1, snapshottableDirs.length);
    assertEquals(bar, snapshottableDirs[0].getFullPath());
    webHdfs.disallowSnapshot(bar);
    snapshottableDirs = dfs.getSnapshottableDirListing();
    assertNull(snapshottableDirs);
    try {
      webHdfs.createSnapshot(bar);
      fail("Cannot create snapshot on a non-snapshottable directory");
    } catch (Exception e) {
      GenericTestUtils.assertExceptionContains(
          "Directory is not a snapshottable directory", e);
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:54,代码来源:TestWebHDFS.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.getSnapshotRoot方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。