本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface类的典型用法代码示例。如果您正苦于以下问题:Java FSDatasetInterface类的具体用法?Java FSDatasetInterface怎么用?Java FSDatasetInterface使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
FSDatasetInterface类属于org.apache.hadoop.hdfs.server.datanode包,在下文中一共展示了FSDatasetInterface类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addSomeBlocks
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入依赖的package包/类
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
int bytesAdded = 0;
for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
BlockDataFile.Writer dataOut = ((SimulatedFSDataset.SimulatedBlockInlineChecksumFileWriter) fsdataset
.writeToBlock(0, b, b, false, false, -1, -1)).getBlockDataFile()
.getWriter(0);
assertEquals(0, fsdataset.getFinalizedBlockLength(0,b));
for (int j=1; j <= blockIdToLen(i); ++j) {
dataOut.write(new byte[] {(byte)j});
assertEquals(j, fsdataset.getFinalizedBlockLength(0,b)); // correct length even as we write
bytesAdded++;
}
dataOut.close();
b.setNumBytes(blockIdToLen(i));
fsdataset.finalizeBlock(0,b);
assertEquals(blockIdToLen(i), fsdataset.getFinalizedBlockLength(0,b));
}
return bytesAdded;
}
示例2: checkBlockDataAndSize
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入依赖的package包/类
void checkBlockDataAndSize(FSDatasetInterface fsdataset,
Block b, long expectedLen) throws IOException {
ReplicaToRead replica = fsdataset.getReplicaToRead(0, b);
InputStream input = replica.getBlockInputStream(null, 0);
long lengthRead = 0;
int data;
int count = 0;
while ((data = input.read()) != -1) {
if (count++ < BlockInlineChecksumReader.getHeaderSize()) {
continue;
}
assertEquals(SimulatedFSDataset.DEFAULT_DATABYTE, data);
lengthRead++;
}
assertEquals(expectedLen, lengthRead);
}
示例3: testInvalidate
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入依赖的package包/类
public void testInvalidate() throws IOException {
FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
int bytesAdded = addSomeBlocks(fsdataset);
Block[] deleteBlocks = new Block[2];
deleteBlocks[0] = new Block(1, 0, 0);
deleteBlocks[1] = new Block(2, 0, 0);
fsdataset.invalidate(0,deleteBlocks);
checkInvalidBlock(deleteBlocks[0]);
checkInvalidBlock(deleteBlocks[1]);
long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted, fsdataset.getRemaining());
// Now make sure the rest of the blocks are valid
for (int i=3; i <= NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0);
assertTrue(fsdataset.isValidBlock(0, b, false));
}
}
示例4: addSomeBlocks
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入依赖的package包/类
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
int bytesAdded = 0;
for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
OutputStream dataOut = fsdataset.writeToBlock(b, false, false).dataOut;
assertEquals(0, fsdataset.getLength(b));
for (int j=1; j <= blockIdToLen(i); ++j) {
dataOut.write(j);
assertEquals(j, fsdataset.getLength(b)); // correct length even as we write
bytesAdded++;
}
dataOut.close();
b.setNumBytes(blockIdToLen(i));
fsdataset.finalizeBlock(b);
assertEquals(blockIdToLen(i), fsdataset.getLength(b));
}
return bytesAdded;
}
示例5: testGetMetaData
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入依赖的package包/类
public void testGetMetaData() throws IOException {
FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
Block b = new Block(1, 5, 0);
try {
assertFalse(fsdataset.metaFileExists(b));
assertTrue("Expected an IO exception", false);
} catch (IOException e) {
// ok - as expected
}
addSomeBlocks(fsdataset); // Only need to add one but ....
b = new Block(1, 0, 0);
InputStream metaInput = fsdataset.getMetaDataInputStream(b);
DataInputStream metaDataInput = new DataInputStream(metaInput);
short version = metaDataInput.readShort();
assertEquals(FSDataset.METADATA_VERSION, version);
DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType());
assertEquals(0, checksum.getChecksumSize());
}
示例6: testInvalidate
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入依赖的package包/类
public void testInvalidate() throws IOException {
FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
int bytesAdded = addSomeBlocks(fsdataset);
Block[] deleteBlocks = new Block[2];
deleteBlocks[0] = new Block(1, 0, 0);
deleteBlocks[1] = new Block(2, 0, 0);
fsdataset.invalidate(deleteBlocks);
checkInvalidBlock(deleteBlocks[0]);
checkInvalidBlock(deleteBlocks[1]);
long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted, fsdataset.getRemaining());
// Now make sure the rest of the blocks are valid
for (int i=3; i <= NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0);
assertTrue(fsdataset.isValidBlock(b));
}
}
示例7: addSomeBlocks
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入依赖的package包/类
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
int bytesAdded = 0;
for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
BlockWriteStreams out = bInfo.createStreams(true, 512, 4);
try {
OutputStream dataOut = out.dataOut;
assertEquals(0, fsdataset.getLength(b));
for (int j=1; j <= blockIdToLen(i); ++j) {
dataOut.write(j);
assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
bytesAdded++;
}
} finally {
out.close();
}
b.setNumBytes(blockIdToLen(i));
fsdataset.finalizeBlock(b);
assertEquals(blockIdToLen(i), fsdataset.getLength(b));
}
return bytesAdded;
}
示例8: addSomeBlocks
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入依赖的package包/类
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
int bytesAdded = 0;
for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
OutputStream dataOut = fsdataset.writeToBlock(0, b, false, false).dataOut;
assertEquals(0, fsdataset.getFinalizedBlockLength(0,b));
for (int j=1; j <= blockIdToLen(i); ++j) {
dataOut.write(j);
assertEquals(j, fsdataset.getFinalizedBlockLength(0,b)); // correct length even as we write
bytesAdded++;
}
dataOut.close();
b.setNumBytes(blockIdToLen(i));
fsdataset.finalizeBlock(0,b);
assertEquals(blockIdToLen(i), fsdataset.getFinalizedBlockLength(0,b));
}
return bytesAdded;
}
示例9: testGetMetaData
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入依赖的package包/类
public void testGetMetaData() throws IOException {
FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
Block b = new Block(1, 5, 0);
try {
assertFalse(fsdataset.metaFileExists(0,b));
assertTrue("Expected an IO exception", false);
} catch (IOException e) {
// ok - as expected
}
addSomeBlocks(fsdataset); // Only need to add one but ....
b = new Block(1, 0, 0);
InputStream metaInput = fsdataset.getMetaDataInputStream(0,b);
DataInputStream metaDataInput = new DataInputStream(metaInput);
short version = metaDataInput.readShort();
assertEquals(FSDataset.METADATA_VERSION, version);
DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType());
assertEquals(0, checksum.getChecksumSize());
}
示例10: addSomeBlocks
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入依赖的package包/类
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
int bytesAdded = 0;
for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
OutputStream dataOut = fsdataset.writeToBlock(b, false).dataOut;
assertEquals(0, fsdataset.getLength(b));
for (int j=1; j <= blockIdToLen(i); ++j) {
dataOut.write(j);
assertEquals(j, fsdataset.getLength(b)); // correct length even as we write
bytesAdded++;
}
dataOut.close();
b.setNumBytes(blockIdToLen(i));
fsdataset.finalizeBlock(b);
assertEquals(blockIdToLen(i), fsdataset.getLength(b));
}
return bytesAdded;
}
示例11: injectBlocks
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入依赖的package包/类
/**
* This method is valid only if the data nodes have simulated data
* @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes()
* @param blocksToInject - the blocks
* @throws IOException
* if not simulatedFSDataset
* if any of blocks already exist in the data node
*
*/
public void injectBlocks(int dataNodeIndex, Block[] blocksToInject) throws IOException {
if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
throw new IndexOutOfBoundsException();
}
FSDatasetInterface dataSet = dataNodes.get(dataNodeIndex).datanode.getFSDataset();
if (!(dataSet instanceof SimulatedFSDataset)) {
throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
}
SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
sdataset.injectBlocks(getNameNode().getNamespaceID(), blocksToInject);
dataNodes.get(dataNodeIndex).datanode.scheduleNSBlockReport(0);
}
示例12: testStorageUsage
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入依赖的package包/类
public void testStorageUsage() throws IOException {
FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
assertEquals(fsdataset.getDfsUsed(), 0);
assertEquals(fsdataset.getRemaining(), fsdataset.getCapacity());
int bytesAdded = addSomeBlocks(fsdataset);
assertEquals(bytesAdded, fsdataset.getDfsUsed());
assertEquals(fsdataset.getCapacity()-bytesAdded, fsdataset.getRemaining());
}
示例13: testWriteRead
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入依赖的package包/类
public void testWriteRead() throws IOException {
FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
addSomeBlocks(fsdataset);
for (int i=1; i <= NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0);
assertTrue(fsdataset.isValidBlock(0, b, false));
assertEquals(blockIdToLen(i), fsdataset.getFinalizedBlockLength(0,b));
checkBlockDataAndSize(fsdataset, b, blockIdToLen(i));
}
}
示例14: testGetBlockReport
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入依赖的package包/类
public void testGetBlockReport() throws IOException {
FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
Block[] blockReport = fsdataset.getBlockReport(0);
assertEquals(0, blockReport.length);
int bytesAdded = addSomeBlocks(fsdataset);
blockReport = fsdataset.getBlockReport(0);
assertEquals(NUMBLOCKS, blockReport.length);
for (Block b: blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
}
}
示例15: testInValidBlocks
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface; //导入依赖的package包/类
public void testInValidBlocks() throws IOException {
FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
Block b = new Block(1, 5, 0);
checkInvalidBlock(b);
// Now check invlaid after adding some blocks
addSomeBlocks(fsdataset);
b = new Block(NUMBLOCKS + 99, 5, 0);
checkInvalidBlock(b);
}