本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.FSDirectory.getINode方法的典型用法代码示例。如果您正苦于以下问题:Java FSDirectory.getINode方法的具体用法?Java FSDirectory.getINode怎么用?Java FSDirectory.getINode使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.FSDirectory
的用法示例。
在下文中一共展示了FSDirectory.getINode方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testDisallowNestedSnapshottableDir
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; //导入方法依赖的package包/类
/**
* When we have nested snapshottable directories and if we try to reset the
* snapshottable descendant back to an regular directory, we need to replace
* the snapshottable descendant with an INodeDirectoryWithSnapshot
*/
@Test
public void testDisallowNestedSnapshottableDir() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
final Path dir = new Path("/dir");
final Path sub = new Path(dir, "sub");
hdfs.mkdirs(sub);
SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
final Path file = new Path(sub, "file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
INode subNode = fsdir.getINode(sub.toString());
assertTrue(subNode.asDirectory().isWithSnapshot());
hdfs.allowSnapshot(sub);
subNode = fsdir.getINode(sub.toString());
assertTrue(subNode.isDirectory() && subNode.asDirectory().isSnapshottable());
hdfs.disallowSnapshot(sub);
subNode = fsdir.getINode(sub.toString());
assertTrue(subNode.asDirectory().isWithSnapshot());
}
示例2: testDisallowNestedSnapshottableDir
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; //导入方法依赖的package包/类
/**
* When we have nested snapshottable directories and if we try to reset the
* snapshottable descendant back to an regular directory, we need to replace
* the snapshottable descendant with an INodeDirectoryWithSnapshot
*/
@Test
public void testDisallowNestedSnapshottableDir() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
final Path dir = new Path("/dir");
final Path sub = new Path(dir, "sub");
hdfs.mkdirs(sub);
SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
final Path file = new Path(sub, "file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
INode subNode = fsdir.getINode(sub.toString());
assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
hdfs.allowSnapshot(sub);
subNode = fsdir.getINode(sub.toString());
assertTrue(subNode instanceof INodeDirectorySnapshottable);
hdfs.disallowSnapshot(sub);
subNode = fsdir.getINode(sub.toString());
assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
}
示例3: testDisallowNestedSnapshottableDir
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; //导入方法依赖的package包/类
/**
* When we have nested snapshottable directories and if we try to reset the
* snapshottable descendant back to an regular directory, we need to replace
* the snapshottable descendant with an INodeDirectoryWithSnapshot
*/
@Test
public void testDisallowNestedSnapshottableDir() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
final Path dir = new Path("/dir");
final Path sub = new Path(dir, "sub");
hdfs.mkdirs(sub);
SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
final Path file = new Path(sub, "file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
INode subNode = fsdir.getINode(sub.toString());
assertTrue(subNode.asDirectory().isWithSnapshot());
hdfs.allowSnapshot(sub);
subNode = fsdir.getINode(sub.toString());
assertTrue(subNode instanceof INodeDirectorySnapshottable);
hdfs.disallowSnapshot(sub);
subNode = fsdir.getINode(sub.toString());
assertTrue(subNode.asDirectory().isWithSnapshot());
}
示例4: rescanCacheDirectives
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; //导入方法依赖的package包/类
/**
* Scan all CacheDirectives. Use the information to figure out
* what cache replication factor each block should have.
*/
private void rescanCacheDirectives() {
FSDirectory fsDir = namesystem.getFSDirectory();
final long now = new Date().getTime();
for (CacheDirective directive : cacheManager.getCacheDirectives()) {
scannedDirectives++;
// Skip processing this entry if it has expired
if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
LOG.debug("Directive {}: the directive expired at {} (now = {})",
directive.getId(), directive.getExpiryTime(), now);
continue;
}
String path = directive.getPath();
INode node;
try {
node = fsDir.getINode(path);
} catch (UnresolvedLinkException e) {
// We don't cache through symlinks
LOG.debug("Directive {}: got UnresolvedLinkException while resolving "
+ "path {}", directive.getId(), path
);
continue;
}
if (node == null) {
LOG.debug("Directive {}: No inode found at {}", directive.getId(),
path);
} else if (node.isDirectory()) {
INodeDirectory dir = node.asDirectory();
ReadOnlyList<INode> children = dir
.getChildrenList(Snapshot.CURRENT_STATE_ID);
for (INode child : children) {
if (child.isFile()) {
rescanFile(directive, child.asFile());
}
}
} else if (node.isFile()) {
rescanFile(directive, node.asFile());
} else {
LOG.debug("Directive {}: ignoring non-directive, non-file inode {} ",
directive.getId(), node);
}
}
}
示例5: rescanCacheDirectives
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; //导入方法依赖的package包/类
/**
* Scan all CacheDirectives. Use the information to figure out
* what cache replication factor each block should have.
*/
private void rescanCacheDirectives() {
FSDirectory fsDir = namesystem.getFSDirectory();
final long now = new Date().getTime();
for (CacheDirective directive : cacheManager.getCacheDirectives()) {
scannedDirectives++;
// Skip processing this entry if it has expired
if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
if (LOG.isDebugEnabled()) {
LOG.debug("Directive " + directive.getId() + ": the directive " +
"expired at " + directive.getExpiryTime() + " (now = " +
now + ")");
}
continue;
}
String path = directive.getPath();
INode node;
try {
node = fsDir.getINode(path);
} catch (UnresolvedLinkException e) {
// We don't cache through symlinks
if (LOG.isDebugEnabled()) {
LOG.debug("Directive " + directive.getId() +
": got UnresolvedLinkException while resolving path " + path);
}
continue;
}
if (node == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Directive " + directive.getId() +
": No inode found at " + path);
}
} else if (node.isDirectory()) {
INodeDirectory dir = node.asDirectory();
ReadOnlyList<INode> children = dir
.getChildrenList(Snapshot.CURRENT_STATE_ID);
for (INode child : children) {
if (child.isFile()) {
rescanFile(directive, child.asFile());
}
}
} else if (node.isFile()) {
rescanFile(directive, node.asFile());
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Directive " + directive.getId() +
": ignoring non-directive, non-file inode " + node);
}
}
}
}