本文整理汇总了Java中org.apache.hadoop.hdfs.DistributedFileSystem.createSnapshot方法的典型用法代码示例。如果您正苦于以下问题:Java DistributedFileSystem.createSnapshot方法的具体用法?Java DistributedFileSystem.createSnapshot怎么用?Java DistributedFileSystem.createSnapshot使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.DistributedFileSystem
的用法示例。
在下文中一共展示了DistributedFileSystem.createSnapshot方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testFsckForSnapshotFiles
import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
* Test for including the snapshot files in fsck report
*/
@Test
public void testFsckForSnapshotFiles() throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
try {
String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots",
"-files");
assertTrue(runFsck.contains("HEALTHY"));
final String fileName = "/srcdat";
DistributedFileSystem hdfs = cluster.getFileSystem();
Path file1 = new Path(fileName);
DFSTestUtil.createFile(hdfs, file1, 1024, (short) 1, 1000L);
hdfs.allowSnapshot(new Path("/"));
hdfs.createSnapshot(new Path("/"), "mySnapShot");
runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots", "-files");
assertTrue(runFsck.contains("/.snapshot/mySnapShot/srcdat"));
runFsck = runFsck(conf, 0, true, "/", "-files");
assertFalse(runFsck.contains("mySnapShot"));
} finally {
cluster.shutdown();
}
}
示例2: createSnapshot
import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
* Create snapshot for a dir using a given snapshot name
*
* @param hdfs DistributedFileSystem instance
* @param snapshotRoot The dir to be snapshotted
* @param snapshotName The name of the snapshot
* @return The path of the snapshot root
*/
public static Path createSnapshot(DistributedFileSystem hdfs,
Path snapshotRoot, String snapshotName) throws Exception {
LOG.info("createSnapshot " + snapshotName + " for " + snapshotRoot);
assertTrue(hdfs.exists(snapshotRoot));
hdfs.allowSnapshot(snapshotRoot);
hdfs.createSnapshot(snapshotRoot, snapshotName);
// set quota to a large value for testing counts
hdfs.setQuota(snapshotRoot, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
return SnapshotTestHelper.getSnapshotRoot(snapshotRoot, snapshotName);
}
示例3: testSnapshotStatsMXBeanInfo
import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
* Test getting SnapshotStatsMXBean information
*/
@Test
public void testSnapshotStatsMXBeanInfo() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
String pathName = "/snapshot";
Path path = new Path(pathName);
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
SnapshotManager sm = cluster.getNamesystem().getSnapshotManager();
DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
dfs.mkdirs(path);
dfs.allowSnapshot(path);
dfs.createSnapshot(path);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=SnapshotInfo");
CompositeData[] directories =
(CompositeData[]) mbs.getAttribute(
mxbeanName, "SnapshottableDirectories");
int numDirectories = Array.getLength(directories);
assertEquals(sm.getNumSnapshottableDirs(), numDirectories);
CompositeData[] snapshots =
(CompositeData[]) mbs.getAttribute(mxbeanName, "Snapshots");
int numSnapshots = Array.getLength(snapshots);
assertEquals(sm.getNumSnapshots(), numSnapshots);
CompositeData d = (CompositeData) Array.get(directories, 0);
CompositeData s = (CompositeData) Array.get(snapshots, 0);
assertTrue(((String) d.get("path")).contains(pathName));
assertTrue(((String) s.get("snapshotDirectory")).contains(pathName));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}