本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo.addDeleteBlock方法的典型用法代码示例。如果您正苦于以下问题:Java BlocksMapUpdateInfo.addDeleteBlock方法的具体用法?Java BlocksMapUpdateInfo.addDeleteBlock怎么用?Java BlocksMapUpdateInfo.addDeleteBlock使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo
的用法示例。
在下文中一共展示了BlocksMapUpdateInfo.addDeleteBlock方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: cleanZeroSizeBlock
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; //导入方法依赖的package包/类
/**
* When deleting a file in the current fs directory, and the file is contained
* in a snapshot, we should delete the last block if it's under construction
* and its size is 0.
*/
void cleanZeroSizeBlock(final INodeFile f,
final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfoContiguous[] blocks = f.getBlocks();
if (blocks != null && blocks.length > 0
&& blocks[blocks.length - 1] instanceof BlockInfoContiguousUnderConstruction) {
BlockInfoContiguousUnderConstruction lastUC =
(BlockInfoContiguousUnderConstruction) blocks[blocks.length - 1];
if (lastUC.getNumBytes() == 0) {
// this is a 0-sized block. do not need check its UC state here
collectedBlocks.addDeleteBlock(lastUC);
f.removeLastBlock(lastUC);
}
}
}
示例2: destroyAndCollectSnapshotBlocks
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; //导入方法依赖的package包/类
public void destroyAndCollectSnapshotBlocks(
BlocksMapUpdateInfo collectedBlocks) {
if(blocks == null || collectedBlocks == null)
return;
for(BlockInfoContiguous blk : blocks)
collectedBlocks.addDeleteBlock(blk);
blocks = null;
}
示例3: cleanZeroSizeBlock
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; //导入方法依赖的package包/类
/**
* When deleting a file in the current fs directory, and the file is contained
* in a snapshot, we should delete the last block if it's under construction
* and its size is 0.
*/
void cleanZeroSizeBlock(final INodeFile f,
final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfo[] blocks = f.getBlocks();
if (blocks != null && blocks.length > 0
&& !blocks[blocks.length - 1].isComplete()) {
BlockInfo lastUC = blocks[blocks.length - 1];
if (lastUC.getNumBytes() == 0) {
// this is a 0-sized block. do not need check its UC state here
collectedBlocks.addDeleteBlock(lastUC);
f.removeLastBlock(lastUC);
}
}
}
示例4: destroyAndCollectSnapshotBlocks
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; //导入方法依赖的package包/类
public void destroyAndCollectSnapshotBlocks(
BlocksMapUpdateInfo collectedBlocks) {
if (blocks == null || collectedBlocks == null) {
return;
}
for (BlockInfo blk : blocks) {
collectedBlocks.addDeleteBlock(blk);
}
blocks = null;
}
示例5: cleanZeroSizeBlock
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; //导入方法依赖的package包/类
/**
* When deleting a file in the current fs directory, and the file is contained
* in a snapshot, we should delete the last block if it's under construction
* and its size is 0.
*/
void cleanZeroSizeBlock(final INodeFile f,
final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfo[] blocks = f.getBlocks();
if (blocks != null && blocks.length > 0
&& blocks[blocks.length - 1] instanceof BlockInfoUnderConstruction) {
BlockInfoUnderConstruction lastUC =
(BlockInfoUnderConstruction) blocks[blocks.length - 1];
if (lastUC.getNumBytes() == 0) {
// this is a 0-sized block. do not need check its UC state here
collectedBlocks.addDeleteBlock(lastUC);
f.removeLastBlock(lastUC);
}
}
}
示例6: collectBlocksBeyondMax
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; //导入方法依赖的package包/类
private void collectBlocksBeyondMax(final INodeFile file, final long max,
final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfo[] oldBlocks = file.getBlocks();
if (oldBlocks != null) {
//find the minimum n such that the size of the first n blocks > max
int n = 0;
for(long size = 0; n < oldBlocks.length && max > size; n++) {
size += oldBlocks[n].getNumBytes();
}
// starting from block n, the data is beyond max.
if (n < oldBlocks.length) {
// resize the array.
final BlockInfo[] newBlocks;
if (n == 0) {
newBlocks = BlockInfo.EMPTY_ARRAY;
} else {
newBlocks = new BlockInfo[n];
System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
}
// set new blocks
file.setBlocks(newBlocks);
// collect the blocks beyond max.
if (collectedBlocks != null) {
for(; n < oldBlocks.length; n++) {
collectedBlocks.addDeleteBlock(oldBlocks[n]);
}
}
}
}
}
示例7: collectBlocksBeyondMax
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; //导入方法依赖的package包/类
private static void collectBlocksBeyondMax(final FileWithSnapshot file,
final long max, final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfo[] oldBlocks = file.asINodeFile().getBlocks();
if (oldBlocks != null) {
//find the minimum n such that the size of the first n blocks > max
int n = 0;
for(long size = 0; n < oldBlocks.length && max > size; n++) {
size += oldBlocks[n].getNumBytes();
}
// starting from block n, the data is beyond max.
if (n < oldBlocks.length) {
// resize the array.
final BlockInfo[] newBlocks;
if (n == 0) {
newBlocks = null;
} else {
newBlocks = new BlockInfo[n];
System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
}
// set new blocks
file.asINodeFile().setBlocks(newBlocks);
// collect the blocks beyond max.
if (collectedBlocks != null) {
for(; n < oldBlocks.length; n++) {
collectedBlocks.addDeleteBlock(oldBlocks[n]);
}
}
}
}
}
示例8: collectBlocksBeyondMax
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; //导入方法依赖的package包/类
private void collectBlocksBeyondMax(final INodeFile file, final long max,
final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfo[] oldBlocks = file.getBlocks();
if (oldBlocks != null) {
//find the minimum n such that the size of the first n blocks > max
int n = 0;
for(long size = 0; n < oldBlocks.length && max > size; n++) {
size += oldBlocks[n].getNumBytes();
}
// starting from block n, the data is beyond max.
if (n < oldBlocks.length) {
// resize the array.
final BlockInfo[] newBlocks;
if (n == 0) {
newBlocks = null;
} else {
newBlocks = new BlockInfo[n];
System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
}
// set new blocks
file.setBlocks(newBlocks);
// collect the blocks beyond max.
if (collectedBlocks != null) {
for(; n < oldBlocks.length; n++) {
collectedBlocks.addDeleteBlock(oldBlocks[n]);
}
}
}
}
}