本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.INodeFile类的典型用法代码示例。如果您正苦于以下问题:Java INodeFile类的具体用法?Java INodeFile怎么用?Java INodeFile使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
INodeFile类属于org.apache.hadoop.hdfs.server.namenode包,在下文中一共展示了INodeFile类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: cleanFile
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
public QuotaCounts cleanFile(final BlockStoragePolicySuite bsps,
final INodeFile file, final int snapshotId,
int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
// delete the current file while the file has snapshot feature
if (!isCurrentFileDeleted()) {
file.recordModification(priorSnapshotId);
deleteCurrentFile();
}
collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes);
return new QuotaCounts.Builder().build();
} else { // delete the snapshot
priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
return diffs.deleteSnapshotDiff(bsps, snapshotId, priorSnapshotId, file,
collectedBlocks, removedINodes);
}
}
示例2: collectBlocksAndClear
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
/**
* If some blocks at the end of the block list no longer belongs to
* any inode, collect them and update the block list.
*/
public void collectBlocksAndClear(final BlockStoragePolicySuite bsps, final INodeFile file,
final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
// check if everything is deleted.
if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
file.destroyAndCollectBlocks(bsps, info, removedINodes);
return;
}
// find max file size.
final long max;
FileDiff diff = getDiffs().getLast();
if (isCurrentFileDeleted()) {
max = diff == null? 0: diff.getFileSize();
} else {
max = file.computeFileSize();
}
// Collect blocks that should be deleted
FileDiff last = diffs.getLast();
BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks();
if(snapshotBlocks == null)
file.collectBlocksBeyondMax(max, info);
else
file.collectBlocksBeyondSnapshot(snapshotBlocks, info);
}
示例3: checkSnapshotFileReplication
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
/**
* Check the replication for both the current file and all its prior snapshots
*
* @param currentFile
* the Path of the current file
* @param snapshotRepMap
* A map maintaining all the snapshots of the current file, as well
* as their expected replication number stored in their corresponding
* INodes
* @param expectedBlockRep
* The expected replication number
* @throws Exception
*/
private void checkSnapshotFileReplication(Path currentFile,
Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
// First check the getBlockReplication for the INode of the currentFile
final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
// Then check replication for every snapshot
for (Path ss : snapshotRepMap.keySet()) {
final INodesInPath iip = fsdir.getINodesInPath(ss.toString(), true);
final INodeFile ssInode = iip.getLastINode().asFile();
// The replication number derived from the
// INodeFileWithLink#getBlockReplication should always == expectedBlockRep
assertEquals(expectedBlockRep, ssInode.getBlockReplication());
// Also check the number derived from INodeFile#getFileReplication
assertEquals(snapshotRepMap.get(ss).shortValue(),
ssInode.getFileReplication(iip.getPathSnapshotId()));
}
}
示例4: cleanFile
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
public void cleanFile(INode.ReclaimContext reclaimContext,
final INodeFile file, final int snapshotId, int priorSnapshotId,
byte storagePolicyId) {
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
// delete the current file while the file has snapshot feature
if (!isCurrentFileDeleted()) {
file.recordModification(priorSnapshotId);
deleteCurrentFile();
}
final BlockStoragePolicy policy = reclaimContext.storagePolicySuite()
.getPolicy(storagePolicyId);
QuotaCounts old = file.storagespaceConsumed(policy);
collectBlocksAndClear(reclaimContext, file);
QuotaCounts current = file.storagespaceConsumed(policy);
reclaimContext.quotaDelta().add(old.subtract(current));
} else { // delete the snapshot
priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
diffs.deleteSnapshotDiff(reclaimContext, snapshotId, priorSnapshotId,
file);
}
}
示例5: collectBlocksAndClear
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
/**
* If some blocks at the end of the block list no longer belongs to
* any inode, collect them and update the block list.
*/
public void collectBlocksAndClear(
INode.ReclaimContext reclaimContext, final INodeFile file) {
// check if everything is deleted.
if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
file.clearFile(reclaimContext);
return;
}
// find max file size.
final long max;
FileDiff diff = getDiffs().getLast();
if (isCurrentFileDeleted()) {
max = diff == null? 0: diff.getFileSize();
} else {
max = file.computeFileSize();
}
// Collect blocks that should be deleted
FileDiff last = diffs.getLast();
BlockInfo[] snapshotBlocks = last == null ? null : last.getBlocks();
if(snapshotBlocks == null)
file.collectBlocksBeyondMax(max, reclaimContext.collectedBlocks());
else
file.collectBlocksBeyondSnapshot(snapshotBlocks,
reclaimContext.collectedBlocks());
}
示例6: acquire
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
@Override
protected void acquire(TransactionLocks locks) throws IOException {
BlockLock blockLock = (BlockLock) locks.getLock(Type.Block);
for (INodeFile iNodeFile : blockLock.getFiles()) {
Block lastBlock = iNodeFile.getLastBlock();
if (iNodeFile.getLastBlock() != null) {
List<Replica> replicas = (List<Replica>) EntityManager
.findList(Replica.Finder.ByBlockIdAndINodeId,
lastBlock.getBlockId(),
iNodeFile.getId());
if (replicas != null) {
Collections.sort(replicas, new Comparator<Replica>() {
@Override
public int compare(Replica o1, Replica o2) {
return new Integer(o1.getBucketId()).compareTo(o2.getBucketId());
}
});
for (Replica replica : replicas) {
EntityManager.find(HashBucket.Finder.ByStorageIdAndBucketId, replica
.getStorageId(), replica.getBucketId());
}
}
}
}
}
示例7: cleanFile
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
public Quota.Counts cleanFile(final INodeFile file, final int snapshotId,
int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
// delete the current file while the file has snapshot feature
if (!isCurrentFileDeleted()) {
file.recordModification(priorSnapshotId);
deleteCurrentFile();
}
collectBlocksAndClear(file, collectedBlocks, removedINodes);
return Quota.Counts.newInstance();
} else { // delete the snapshot
priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
return diffs.deleteSnapshotDiff(snapshotId, priorSnapshotId, file,
collectedBlocks, removedINodes);
}
}
示例8: updateQuotaAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
public Quota.Counts updateQuotaAndCollectBlocks(INodeFile file,
FileDiff removed, BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
long oldDiskspace = file.diskspaceConsumed();
if (removed.snapshotINode != null) {
short replication = removed.snapshotINode.getFileReplication();
short currentRepl = file.getBlockReplication();
if (currentRepl == 0) {
oldDiskspace = file.computeFileSize(true, true) * replication;
} else if (replication > currentRepl) {
oldDiskspace = oldDiskspace / file.getBlockReplication() * replication;
}
}
collectBlocksAndClear(file, collectedBlocks, removedINodes);
long dsDelta = oldDiskspace - file.diskspaceConsumed();
return Quota.Counts.newInstance(0, dsDelta);
}
示例9: collectBlocksAndClear
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
/**
* If some blocks at the end of the block list no longer belongs to
* any inode, collect them and update the block list.
*/
private void collectBlocksAndClear(final INodeFile file,
final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
// check if everything is deleted.
if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
file.destroyAndCollectBlocks(info, removedINodes);
return;
}
// find max file size.
final long max;
if (isCurrentFileDeleted()) {
final FileDiff last = getDiffs().getLast();
max = last == null? 0: last.getFileSize();
} else {
max = file.computeFileSize();
}
collectBlocksBeyondMax(file, max, info);
}
示例10: serializeFileDiffList
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
private void serializeFileDiffList(INodeFile file, OutputStream out)
throws IOException {
FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
if (sf != null) {
List<FileDiff> diffList = sf.getDiffs().asList();
SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry
.newBuilder().setInodeId(file.getId()).setType(Type.FILEDIFF)
.setNumOfDiff(diffList.size()).build();
entry.writeDelimitedTo(out);
for (int i = diffList.size() - 1; i >= 0; i--) {
FileDiff diff = diffList.get(i);
SnapshotDiffSection.FileDiff.Builder fb = SnapshotDiffSection.FileDiff
.newBuilder().setSnapshotId(diff.getSnapshotId())
.setFileSize(diff.getFileSize());
INodeFileAttributes copy = diff.snapshotINode;
if (copy != null) {
fb.setName(ByteString.copyFrom(copy.getLocalNameBytes()))
.setSnapshotCopy(buildINodeFile(copy, parent.getSaverContext()));
}
fb.build().writeDelimitedTo(out);
}
}
}
示例11: checkSnapshotFileReplication
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
/**
* Check the replication for both the current file and all its prior snapshots
*
* @param currentFile
* the Path of the current file
* @param snapshotRepMap
* A map maintaining all the snapshots of the current file, as well
* as their expected replication number stored in their corresponding
* INodes
* @param expectedBlockRep
* The expected replication number that should be returned by
* {@link INodeFileWithSnapshot#getBlockReplication()} of all the INodes
* @throws Exception
*/
private void checkSnapshotFileReplication(Path currentFile,
Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
// First check the getBlockReplication for the INode of the currentFile
final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
// Then check replication for every snapshot
for (Path ss : snapshotRepMap.keySet()) {
final INodesInPath iip = fsdir.getLastINodeInPath(ss.toString());
final INodeFile ssInode = (INodeFile)iip.getLastINode();
// The replication number derived from the
// INodeFileWithLink#getBlockReplication should always == expectedBlockRep
assertEquals(expectedBlockRep, ssInode.getBlockReplication());
// Also check the number derived from INodeFile#getFileReplication
assertEquals(snapshotRepMap.get(ss).shortValue(),
ssInode.getFileReplication(iip.getPathSnapshotId()));
}
}
示例12: checkRaidProgress
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
/**
* Count the number of live replicas of each parity block in the raided file
* If any stripe has not enough parity block replicas, add the stripe to
* raidEncodingTasks to schedule encoding.
* If forceAdd is true, we always add the stripe to raidEncodingTasks
* without checking
* @param sourceINode
* @param raidTasks
* @param fs
* @param forceAdd
* @return true if all parity blocks of the file have enough replicas
* @throws IOException
*/
public boolean checkRaidProgress(INodeFile sourceINode,
LightWeightLinkedSet<RaidBlockInfo> raidEncodingTasks, FSNamesystem fs,
boolean forceAdd) throws IOException {
boolean result = true;
BlockInfo[] blocks = sourceINode.getBlocks();
for (int i = 0; i < blocks.length;
i += numStripeBlocks) {
boolean hasParity = true;
if (!forceAdd) {
for (int j = 0; j < numParityBlocks; j++) {
if (fs.countLiveNodes(blocks[i + j]) < this.parityReplication) {
hasParity = false;
break;
}
}
}
if (!hasParity || forceAdd) {
raidEncodingTasks.add(new RaidBlockInfo(blocks[i], parityReplication, i));
result = false;
}
}
return result;
}
示例13: updateQuotaAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
private static Quota.Counts updateQuotaAndCollectBlocks(
INodeFile currentINode, FileDiff removed,
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
FileWithSnapshot sFile = (FileWithSnapshot) currentINode;
long oldDiskspace = currentINode.diskspaceConsumed();
if (removed.snapshotINode != null) {
short replication = removed.snapshotINode.getFileReplication();
short currentRepl = currentINode.getBlockReplication();
if (currentRepl == 0) {
oldDiskspace = currentINode.computeFileSize(true, true) * replication;
} else if (replication > currentRepl) {
oldDiskspace = oldDiskspace / currentINode.getBlockReplication()
* replication;
}
}
Util.collectBlocksAndClear(sFile, collectedBlocks, removedINodes);
long dsDelta = oldDiskspace - currentINode.diskspaceConsumed();
return Quota.Counts.newInstance(0, dsDelta);
}
示例14: checkSnapshotFileReplication
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
/**
* Check the replication for both the current file and all its prior snapshots
*
* @param currentFile
* the Path of the current file
* @param snapshotRepMap
* A map maintaining all the snapshots of the current file, as well
* as their expected replication number stored in their corresponding
* INodes
* @param expectedBlockRep
* The expected replication number that should be returned by
* {@link INodeFileWithSnapshot#getBlockReplication()} of all the INodes
* @throws Exception
*/
private void checkSnapshotFileReplication(Path currentFile,
Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
// First check the getBlockReplication for the INode of the currentFile
final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
// Then check replication for every snapshot
for (Path ss : snapshotRepMap.keySet()) {
final INodesInPath iip = fsdir.getLastINodeInPath(ss.toString());
final INodeFile ssInode = (INodeFile)iip.getLastINode();
// The replication number derived from the
// INodeFileWithLink#getBlockReplication should always == expectedBlockRep
assertEquals(expectedBlockRep, ssInode.getBlockReplication());
// Also check the number derived from INodeFile#getFileReplication
assertEquals(snapshotRepMap.get(ss).shortValue(),
ssInode.getFileReplication(iip.getPathSnapshot()));
}
}
示例15: cleanFile
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入依赖的package包/类
public Quota.Counts cleanFile(final INodeFile file, final int snapshotId,
int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException {
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
// delete the current file while the file has snapshot feature
if (!isCurrentFileDeleted()) {
file.recordModification(priorSnapshotId);
deleteCurrentFile();
}
collectBlocksAndClear(file, collectedBlocks, removedINodes);
return Quota.Counts.newInstance();
} else { // delete the snapshot
priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
return diffs.deleteSnapshotDiff(snapshotId, priorSnapshotId, file,
collectedBlocks, removedINodes, countDiffChange);
}
}