本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.finalizeBlock方法的典型用法代码示例。如果您正苦于以下问题:Java FSDatasetInterface.finalizeBlock方法的具体用法?Java FSDatasetInterface.finalizeBlock怎么用?Java FSDatasetInterface.finalizeBlock使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface
的用法示例。
在下文中一共展示了FSDatasetInterface.finalizeBlock方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addSomeBlocks
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入方法依赖的package包/类
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
int bytesAdded = 0;
for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
BlockDataFile.Writer dataOut = ((SimulatedFSDataset.SimulatedBlockInlineChecksumFileWriter) fsdataset
.writeToBlock(0, b, b, false, false, -1, -1)).getBlockDataFile()
.getWriter(0);
assertEquals(0, fsdataset.getFinalizedBlockLength(0,b));
for (int j=1; j <= blockIdToLen(i); ++j) {
dataOut.write(new byte[] {(byte)j});
assertEquals(j, fsdataset.getFinalizedBlockLength(0,b)); // correct length even as we write
bytesAdded++;
}
dataOut.close();
b.setNumBytes(blockIdToLen(i));
fsdataset.finalizeBlock(0,b);
assertEquals(blockIdToLen(i), fsdataset.getFinalizedBlockLength(0,b));
}
return bytesAdded;
}
示例2: addSomeBlocks
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入方法依赖的package包/类
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
int bytesAdded = 0;
for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
OutputStream dataOut = fsdataset.writeToBlock(b, false, false).dataOut;
assertEquals(0, fsdataset.getLength(b));
for (int j=1; j <= blockIdToLen(i); ++j) {
dataOut.write(j);
assertEquals(j, fsdataset.getLength(b)); // correct length even as we write
bytesAdded++;
}
dataOut.close();
b.setNumBytes(blockIdToLen(i));
fsdataset.finalizeBlock(b);
assertEquals(blockIdToLen(i), fsdataset.getLength(b));
}
return bytesAdded;
}
示例3: addSomeBlocks
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入方法依赖的package包/类
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
int bytesAdded = 0;
for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
BlockWriteStreams out = bInfo.createStreams(true, 512, 4);
try {
OutputStream dataOut = out.dataOut;
assertEquals(0, fsdataset.getLength(b));
for (int j=1; j <= blockIdToLen(i); ++j) {
dataOut.write(j);
assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
bytesAdded++;
}
} finally {
out.close();
}
b.setNumBytes(blockIdToLen(i));
fsdataset.finalizeBlock(b);
assertEquals(blockIdToLen(i), fsdataset.getLength(b));
}
return bytesAdded;
}
示例4: addSomeBlocks
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入方法依赖的package包/类
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
int bytesAdded = 0;
for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
OutputStream dataOut = fsdataset.writeToBlock(0, b, false, false).dataOut;
assertEquals(0, fsdataset.getFinalizedBlockLength(0,b));
for (int j=1; j <= blockIdToLen(i); ++j) {
dataOut.write(j);
assertEquals(j, fsdataset.getFinalizedBlockLength(0,b)); // correct length even as we write
bytesAdded++;
}
dataOut.close();
b.setNumBytes(blockIdToLen(i));
fsdataset.finalizeBlock(0,b);
assertEquals(blockIdToLen(i), fsdataset.getFinalizedBlockLength(0,b));
}
return bytesAdded;
}
示例5: addSomeBlocks
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入方法依赖的package包/类
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
int bytesAdded = 0;
for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
OutputStream dataOut = fsdataset.writeToBlock(b, false).dataOut;
assertEquals(0, fsdataset.getLength(b));
for (int j=1; j <= blockIdToLen(i); ++j) {
dataOut.write(j);
assertEquals(j, fsdataset.getLength(b)); // correct length even as we write
bytesAdded++;
}
dataOut.close();
b.setNumBytes(blockIdToLen(i));
fsdataset.finalizeBlock(b);
assertEquals(blockIdToLen(i), fsdataset.getLength(b));
}
return bytesAdded;
}