本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto类的典型用法代码示例。如果您正苦于以下问题:Java BlockProto类的具体用法?Java BlockProto怎么用?Java BlockProto使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
BlockProto类属于org.apache.hadoop.hdfs.protocol.proto.HdfsProtos包,在下文中一共展示了BlockProto类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initReplicaRecovery
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; //导入依赖的package包/类
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
throws IOException {
InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
.newBuilder().setBlock(PBHelper.convert(rBlock)).build();
InitReplicaRecoveryResponseProto resp;
try {
resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
if (!resp.getReplicaFound()) {
// No replica found on the remote node.
return null;
} else {
if (!resp.hasBlock() || !resp.hasState()) {
throw new IOException("Replica was found but missing fields. " +
"Req: " + req + "\n" +
"Resp: " + resp);
}
}
BlockProto b = resp.getBlock();
return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
b.getGenStamp(), PBHelper.convert(resp.getState()));
}
示例2: dumpINodeFile
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; //导入依赖的package包/类
private void dumpINodeFile(INodeSection.INodeFile f) {
o("replication", f.getReplication()).o("mtime", f.getModificationTime())
.o("atime", f.getAccessTime())
.o("perferredBlockSize", f.getPreferredBlockSize())
.o("permission", dumpPermission(f.getPermission()));
if (f.getBlocksCount() > 0) {
out.print("<blocks>");
for (BlockProto b : f.getBlocksList()) {
out.print("<block>");
o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes",
b.getNumBytes());
out.print("</block>\n");
}
out.print("</blocks>\n");
}
if (f.hasFileUC()) {
INodeSection.FileUnderConstructionFeature u = f.getFileUC();
out.print("<file-under-construction>");
o("clientName", u.getClientName()).o("clientMachine",
u.getClientMachine());
out.print("</file-under-construction>\n");
}
}
示例3: dumpINodeFile
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; //导入依赖的package包/类
private void dumpINodeFile(INodeSection.INodeFile f) {
o("replication", f.getReplication()).o("mtime", f.getModificationTime())
.o("atime", f.getAccessTime())
.o("preferredBlockSize", f.getPreferredBlockSize())
.o("permission", dumpPermission(f.getPermission()));
dumpAcls(f.getAcl());
if (f.getBlocksCount() > 0) {
out.print("<blocks>");
for (BlockProto b : f.getBlocksList()) {
out.print("<block>");
o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes",
b.getNumBytes());
out.print("</block>\n");
}
out.print("</blocks>\n");
}
if (f.hasFileUC()) {
INodeSection.FileUnderConstructionFeature u = f.getFileUC();
out.print("<file-under-construction>");
o("clientName", u.getClientName()).o("clientMachine",
u.getClientMachine());
out.print("</file-under-construction>\n");
}
}
示例4: convert
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; //导入依赖的package包/类
public static BlockCommand convert(BlockCommandProto blkCmd) {
List<BlockProto> blockProtoList = blkCmd.getBlocksList();
Block[] blocks = new Block[blockProtoList.size()];
for (int i = 0; i < blockProtoList.size(); i++) {
blocks[i] = PBHelper.convert(blockProtoList.get(i));
}
List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][];
for (int i = 0; i < targetList.size(); i++) {
targets[i] = PBHelper.convert(targetList.get(i));
}
int action = DatanodeProtocol.DNA_UNKNOWN;
switch (blkCmd.getAction()) {
case TRANSFER:
action = DatanodeProtocol.DNA_TRANSFER;
break;
case INVALIDATE:
action = DatanodeProtocol.DNA_INVALIDATE;
break;
case SHUTDOWN:
action = DatanodeProtocol.DNA_SHUTDOWN;
break;
}
return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets);
}
示例5: convert
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; //导入依赖的package包/类
public static BlockCommand convert(BlockCommandProto blkCmd) {
List<BlockProto> blockProtoList = blkCmd.getBlocksList();
Block[] blocks = new Block[blockProtoList.size()];
for (int i = 0; i < blockProtoList.size(); i++) {
blocks[i] = PBHelper.convert(blockProtoList.get(i));
}
List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][];
for (int i = 0; i < targetList.size(); i++) {
targets[i] = PBHelper.convert(targetList.get(i));
}
int action = DatanodeProtocol.DNA_UNKNOWN;
switch (blkCmd.getAction()) {
case TRANSFER:
action = DatanodeProtocol.DNA_TRANSFER;
break;
case INVALIDATE:
action = DatanodeProtocol.DNA_INVALIDATE;
break;
case SHUTDOWN:
action = DatanodeProtocol.DNA_SHUTDOWN;
break;
}
return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets);
}
示例6: run
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; //导入依赖的package包/类
private void run(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
for (int i = 0; i < s.getNumInodes(); ++i) {
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
if (p.getType() == INodeSection.INode.Type.FILE) {
++totalFiles;
INodeSection.INodeFile f = p.getFile();
totalBlocks += f.getBlocksCount();
long fileSize = 0;
for (BlockProto b : f.getBlocksList()) {
fileSize += b.getNumBytes();
}
maxFileSize = Math.max(fileSize, maxFileSize);
totalSpace += fileSize * f.getReplication();
int bucket = fileSize > maxSize ? distribution.length - 1 : (int) Math
.ceil((double)fileSize / steps);
++distribution[bucket];
} else if (p.getType() == INodeSection.INode.Type.DIRECTORY) {
++totalDirectories;
}
if (i % (1 << 20) == 0) {
out.println("Processed " + i + " inodes.");
}
}
}
示例7: testConvertBlock
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; //导入依赖的package包/类
@Test
public void testConvertBlock() {
Block b = new Block(1, 100, 3);
BlockProto bProto = PBHelper.convert(b);
Block b2 = PBHelper.convert(bProto);
assertEquals(b, b2);
}
示例8: testConvertBlock
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; //导入依赖的package包/类
@Test
public void testConvertBlock() {
Block b = new Block(1, 100, 3);
BlockProto bProto = PBHelperClient.convert(b);
Block b2 = PBHelperClient.convert(bProto);
assertEquals(b, b2);
}
示例9: initReplicaRecovery
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; //导入依赖的package包/类
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
throws IOException {
InitReplicaRecoveryRequestProto req =
InitReplicaRecoveryRequestProto.newBuilder()
.setBlock(PBHelper.convert(rBlock)).build();
InitReplicaRecoveryResponseProto resp;
try {
resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
if (!resp.getReplicaFound()) {
// No replica found on the remote node.
return null;
} else {
if (!resp.hasBlock() || !resp.hasState()) {
throw new IOException("Replica was found but missing fields. " +
"Req: " + req + "\n" +
"Resp: " + resp);
}
}
BlockProto b = resp.getBlock();
return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
b.getGenStamp(), PBHelper.convert(resp.getState()));
}
示例10: convert
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; //导入依赖的package包/类
public static BlockCommand convert(BlockCommandProto blkCmd) {
List<BlockProto> blockProtoList = blkCmd.getBlocksList();
Block[] blocks = new Block[blockProtoList.size()];
for (int i = 0; i < blockProtoList.size(); i++) {
blocks[i] = PBHelper.convert(blockProtoList.get(i));
}
List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][];
for (int i = 0; i < targetList.size(); i++) {
targets[i] = PBHelper.convert(targetList.get(i));
}
List<StorageUuidsProto> targetStorageUuidsList = blkCmd.getTargetStorageUuidsList();
String[][] targetStorageIDs = new String[targetStorageUuidsList.size()][];
for(int i = 0; i < targetStorageIDs.length; i++) {
List<String> storageIDs = targetStorageUuidsList.get(i).getStorageUuidsList();
targetStorageIDs[i] = storageIDs.toArray(new String[storageIDs.size()]);
}
int action = DatanodeProtocol.DNA_UNKNOWN;
switch (blkCmd.getAction()) {
case TRANSFER:
action = DatanodeProtocol.DNA_TRANSFER;
break;
case INVALIDATE:
action = DatanodeProtocol.DNA_INVALIDATE;
break;
case SHUTDOWN:
action = DatanodeProtocol.DNA_SHUTDOWN;
break;
default:
throw new AssertionError("Unknown action type: " + blkCmd.getAction());
}
return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets,
targetStorageIDs);
}
示例11: getFileSize
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; //导入依赖的package包/类
private long getFileSize(INodeFile f) {
long size = 0;
for (BlockProto p : f.getBlocksList()) {
size += p.getNumBytes();
}
return size;
}
示例12: convert
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; //导入依赖的package包/类
public static BlockProto convert(Block b) {
return BlockProto.newBuilder().setBlockId(b.getBlockId())
.setGenStamp(b.getGenerationStamp()).setNumBytes(b.getNumBytes())
.build();
}