本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.INodeId类的典型用法代码示例。如果您正苦于以下问题:Java INodeId类的具体用法?Java INodeId怎么用?Java INodeId使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
INodeId类属于org.apache.hadoop.hdfs.server.namenode包,在下文中一共展示了INodeId类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: convert
import org.apache.hadoop.hdfs.server.namenode.INodeId; //导入依赖的package包/类
public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
if (fs == null)
return null;
return new HdfsLocatedFileStatus(
fs.getLength(), fs.getFileType().equals(FileType.IS_DIR),
fs.getBlockReplication(), fs.getBlocksize(),
fs.getModificationTime(), fs.getAccessTime(),
PBHelper.convert(fs.getPermission()), fs.getOwner(), fs.getGroup(),
fs.getFileType().equals(FileType.IS_SYMLINK) ?
fs.getSymlink().toByteArray() : null,
fs.getPath().toByteArray(),
fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID,
fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null,
fs.hasChildrenNum() ? fs.getChildrenNum() : -1,
fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
: BlockStoragePolicySuite.ID_UNSPECIFIED);
}
示例2: testHdfsFileStatus
import org.apache.hadoop.hdfs.server.namenode.INodeId; //导入依赖的package包/类
@Test
public void testHdfsFileStatus() throws IOException {
final long now = Time.now();
final String parent = "/dir";
final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
now, now + 10, new FsPermission((short) 0644), "user", "group",
DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
final FileStatus fstatus = toFileStatus(status, parent);
System.out.println("status = " + status);
System.out.println("fstatus = " + fstatus);
final String json = JsonUtil.toJsonString(status, true);
System.out.println("json = " + json.replace(",", ",\n "));
ObjectReader reader = new ObjectMapper().reader(Map.class);
final HdfsFileStatus s2 =
JsonUtil.toFileStatus((Map<?, ?>) reader.readValue(json), true);
final FileStatus fs2 = toFileStatus(s2, parent);
System.out.println("s2 = " + s2);
System.out.println("fs2 = " + fs2);
Assert.assertEquals(fstatus, fs2);
}
示例3: testOpenFilesWithRename
import org.apache.hadoop.hdfs.server.namenode.INodeId; //导入依赖的package包/类
@Test
public void testOpenFilesWithRename() throws Exception {
Path path = new Path("/test");
doWriteAndAbort(fs, path);
// check for zero sized blocks
Path fileWithEmptyBlock = new Path("/test/test/test4");
fs.create(fileWithEmptyBlock);
NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
String clientName = fs.getClient().getClientName();
// create one empty block
nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
INodeId.GRANDFATHER_INODE_ID, null);
fs.createSnapshot(path, "s2");
fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
fs.delete(new Path("/test/test-renamed"), true);
NameNode nameNode = cluster.getNameNode();
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
NameNodeAdapter.leaveSafeMode(nameNode);
cluster.restartNameNode(true);
}
示例4: removeBlock
import org.apache.hadoop.hdfs.server.namenode.INodeId; //导入依赖的package包/类
/**
* Remove the block from the block map;
* remove it from all data-node lists it belongs to;
* and remove all data-node locations associated with the block.
*/
void removeBlock(Block block) {
BlockInfo blockInfo = blocks.remove(block);
if (blockInfo == null)
return;
blockInfo.setBlockCollectionId(INodeId.INVALID_INODE_ID);
final int size = blockInfo.isStriped() ?
blockInfo.getCapacity() : blockInfo.numNodes();
for(int idx = size - 1; idx >= 0; idx--) {
DatanodeDescriptor dn = blockInfo.getDatanode(idx);
if (dn != null) {
removeBlock(dn, blockInfo); // remove from the list and wipe the location
}
}
}
示例5: getParentPath
import org.apache.hadoop.hdfs.server.namenode.INodeId; //导入依赖的package包/类
@Override
public String getParentPath(long inode) throws IOException {
if (inode == INodeId.ROOT_INODE_ID) {
return "/";
}
byte[] bytes = dirChildMap.get(toBytes(inode));
Preconditions.checkState(bytes != null && bytes.length == 8,
"Can not find parent directory for inode %s, "
+ "fsimage might be corrupted", inode);
long parent = toLong(bytes);
if (!dirPathCache.containsKey(parent)) {
bytes = dirMap.get(toBytes(parent));
if (parent != INodeId.ROOT_INODE_ID) {
Preconditions.checkState(bytes != null,
"Can not find parent directory for inode %s, "
+ ", the fsimage might be corrupted.", parent);
}
String parentName = toString(bytes);
String parentPath =
new Path(getParentPath(parent),
parentName.isEmpty()? "/" : parentName).toString();
dirPathCache.put(parent, parentPath);
}
return dirPathCache.get(parent);
}
示例6: setupMockCluster
import org.apache.hadoop.hdfs.server.namenode.INodeId; //导入依赖的package包/类
@Before
public void setupMockCluster() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
"need to set a dummy value here so it assumes a multi-rack cluster");
fsn = Mockito.mock(FSNamesystem.class);
Mockito.doReturn(true).when(fsn).hasWriteLock();
Mockito.doReturn(true).when(fsn).hasReadLock();
Mockito.doReturn(true).when(fsn).isRunning();
bm = new BlockManager(fsn, conf);
final String[] racks = {
"/rackA",
"/rackA",
"/rackA",
"/rackB",
"/rackB",
"/rackB"};
storages = DFSTestUtil.createDatanodeStorageInfos(racks);
nodes = Arrays.asList(DFSTestUtil.toDatanodeDescriptor(storages));
rackA = nodes.subList(0, 3);
rackB = nodes.subList(3, 6);
mockINodeId = INodeId.ROOT_INODE_ID + 1;
}
示例7: getParentPath
import org.apache.hadoop.hdfs.server.namenode.INodeId; //导入依赖的package包/类
@Override
public String getParentPath(long inode) throws IOException {
if (inode == INodeId.ROOT_INODE_ID) {
return "/";
}
byte[] bytes = dirChildMap.get(toBytes(inode));
Preconditions.checkState(bytes != null && bytes.length == 8,
"Can not find parent directory for inode %s, "
+ "fsimage might be corrupted", inode);
long parent = toLong(bytes);
if (!dirPathCache.containsKey(parent)) {
bytes = dirMap.get(toBytes(parent));
if (parent != INodeId.ROOT_INODE_ID) {
Preconditions.checkState(bytes != null,
"Can not find parent directory for inode %s, "
+ ", the fsimage might be corrupted.", parent);
}
String parentName = toString(bytes);
String parentPath =
new File(getParentPath(parent), parentName).toString();
dirPathCache.put(parent, parentPath);
}
return dirPathCache.get(parent);
}
示例8: testHdfsFileStatus
import org.apache.hadoop.hdfs.server.namenode.INodeId; //导入依赖的package包/类
@Test
public void testHdfsFileStatus() {
final long now = Time.now();
final String parent = "/dir";
final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
now, now + 10, new FsPermission((short) 0644), "user", "group",
DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
final FileStatus fstatus = toFileStatus(status, parent);
System.out.println("status = " + status);
System.out.println("fstatus = " + fstatus);
final String json = JsonUtil.toJsonString(status, true);
System.out.println("json = " + json.replace(",", ",\n "));
final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map<?, ?>)JSON.parse(json), true);
final FileStatus fs2 = toFileStatus(s2, parent);
System.out.println("s2 = " + s2);
System.out.println("fs2 = " + fs2);
Assert.assertEquals(fstatus, fs2);
}
示例9: testHdfsFileStatus
import org.apache.hadoop.hdfs.server.namenode.INodeId; //导入依赖的package包/类
@Test
public void testHdfsFileStatus() {
final long now = Time.now();
final String parent = "/dir";
final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
now, now + 10, new FsPermission((short) 0644), "user", "group",
DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
INodeId.GRANDFATHER_INODE_ID, 0);
final FileStatus fstatus = toFileStatus(status, parent);
System.out.println("status = " + status);
System.out.println("fstatus = " + fstatus);
final String json = JsonUtil.toJsonString(status, true);
System.out.println("json = " + json.replace(",", ",\n "));
final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map<?, ?>)JSON.parse(json), true);
final FileStatus fs2 = toFileStatus(s2, parent);
System.out.println("s2 = " + s2);
System.out.println("fs2 = " + fs2);
Assert.assertEquals(fstatus, fs2);
}
示例10: toFileStatus
import org.apache.hadoop.hdfs.server.namenode.INodeId; //导入依赖的package包/类
/** Convert a Json map to a HdfsFileStatus object. */
public static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
if (json == null) {
return null;
}
final Map<?, ?> m = includesType ?
(Map<?, ?>)json.get(FileStatus.class.getSimpleName()) : json;
final String localName = (String) m.get("pathSuffix");
final PathType type = PathType.valueOf((String) m.get("type"));
final byte[] symlink = type != PathType.SYMLINK? null
: DFSUtil.string2Bytes((String)m.get("symlink"));
final long len = ((Number) m.get("length")).longValue();
final String owner = (String) m.get("owner");
final String group = (String) m.get("group");
final FsPermission permission = toFsPermission((String) m.get("permission"),
(Boolean)m.get("aclBit"), (Boolean)m.get("encBit"));
final long aTime = ((Number) m.get("accessTime")).longValue();
final long mTime = ((Number) m.get("modificationTime")).longValue();
final long blockSize = ((Number) m.get("blockSize")).longValue();
final short replication = ((Number) m.get("replication")).shortValue();
final long fileId = m.containsKey("fileId") ?
((Number) m.get("fileId")).longValue() : INodeId.GRANDFATHER_INODE_ID;
final int childrenNum = getInt(m, "childrenNum", -1);
final byte storagePolicy = m.containsKey("storagePolicy") ?
(byte) ((Number) m.get("storagePolicy")).longValue() :
BlockStoragePolicySuite.ID_UNSPECIFIED;
return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
blockSize, mTime, aTime, permission, owner, group, symlink,
DFSUtil.string2Bytes(localName), fileId, childrenNum, null, storagePolicy);
}
示例11: complete
import org.apache.hadoop.hdfs.server.namenode.INodeId; //导入依赖的package包/类
@Override
public CompleteResponseProto complete(RpcController controller,
CompleteRequestProto req) throws ServiceException {
try {
boolean result =
server.complete(req.getSrc(), req.getClientName(),
req.hasLast() ? PBHelper.convert(req.getLast()) : null,
req.hasFileId() ? req.getFileId() : INodeId.GRANDFATHER_INODE_ID);
return CompleteResponseProto.newBuilder().setResult(result).build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
示例12: getParentPath
import org.apache.hadoop.hdfs.server.namenode.INodeId; //导入依赖的package包/类
@Override
public String getParentPath(long inode) throws IOException {
if (inode == INodeId.ROOT_INODE_ID) {
return "";
}
Dir parent = dirChildMap.get(inode);
if (parent == null) {
// The inode is an INodeReference, which is generated from snapshot.
// For delimited oiv tool, no need to print out metadata in snapshots.
PBImageTextWriter.ignoreSnapshotName(inode);
}
return parent.getPath();
}
示例13: lookup
import org.apache.hadoop.hdfs.server.namenode.INodeId; //导入依赖的package包/类
/**
* Return the INodeId of the specified path.
*/
private long lookup(String path) throws IOException {
Preconditions.checkArgument(path.startsWith("/"));
long id = INodeId.ROOT_INODE_ID;
for (int offset = 0, next; offset < path.length(); offset = next) {
next = path.indexOf('/', offset + 1);
if (next == -1) {
next = path.length();
}
if (offset + 1 > next) {
break;
}
final String component = path.substring(offset + 1, next);
if (component.isEmpty()) {
continue;
}
final long[] children = dirmap.get(id);
if (children == null) {
throw new FileNotFoundException(path);
}
boolean found = false;
for (long cid : children) {
FsImageProto.INodeSection.INode child = fromINodeId(cid);
if (component.equals(child.getName().toStringUtf8())) {
found = true;
id = child.getId();
break;
}
}
if (!found) {
throw new FileNotFoundException(path);
}
}
return id;
}
示例14: run
import org.apache.hadoop.hdfs.server.namenode.INodeId; //导入依赖的package包/类
void run() throws Exception {
String reservedRoot = "/.reserved/.inodes/" + INodeId.ROOT_INODE_ID;
Assert.assertEquals(reservedRoot,
TestPath.mergeStatuses(wrap.
globStatus(new Path(reservedRoot), new AcceptAllPathFilter())));
// These inodes don't show up via listStatus.
Assert.assertEquals("",
TestPath.mergeStatuses(wrap.
globStatus(new Path("/.reserved/*"), new AcceptAllPathFilter())));
}
示例15: testFileCreationError3
import org.apache.hadoop.hdfs.server.namenode.INodeId; //导入依赖的package包/类
/** test addBlock(..) when replication<min and excludeNodes==null. */
@Test
public void testFileCreationError3() throws IOException {
System.out.println("testFileCreationError3 start");
Configuration conf = new HdfsConfiguration();
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
DistributedFileSystem dfs = null;
try {
cluster.waitActive();
dfs = cluster.getFileSystem();
DFSClient client = dfs.dfs;
// create a new file.
final Path f = new Path("/foo.txt");
createFile(dfs, f, 3);
try {
cluster.getNameNodeRpc().addBlock(f.toString(), client.clientName,
null, null, INodeId.GRANDFATHER_INODE_ID, null);
fail();
} catch(IOException ioe) {
FileSystem.LOG.info("GOOD!", ioe);
}
System.out.println("testFileCreationError3 successful");
} finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
}