本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock.getLocations方法的典型用法代码示例。如果您正苦于以下问题:Java RecoveringBlock.getLocations方法的具体用法?Java RecoveringBlock.getLocations怎么用?Java RecoveringBlock.getLocations使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock
的用法示例。
在下文中一共展示了RecoveringBlock.getLocations方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: logRecoverBlock
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入方法依赖的package包/类
private static void logRecoverBlock(String who, RecoveringBlock rb) {
ExtendedBlock block = rb.getBlock();
DatanodeInfo[] targets = rb.getLocations();
LOG.info(who + " calls recoverBlock(" + block
+ ", targets=[" + Joiner.on(", ").join(targets) + "]"
+ ", newGenerationStamp=" + rb.getNewGenerationStamp() + ")");
}
示例2: testConvertRecoveringBlock
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入方法依赖的package包/类
@Test
public void testConvertRecoveringBlock() {
DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
RecoveringBlock b = new RecoveringBlock(getExtendedBlock(), dnInfo, 3);
RecoveringBlockProto bProto = PBHelper.convert(b);
RecoveringBlock b1 = PBHelper.convert(bProto);
assertEquals(b.getBlock(), b1.getBlock());
DatanodeInfo[] dnInfo1 = b1.getLocations();
assertEquals(dnInfo.length, dnInfo1.length);
for (int i=0; i < dnInfo.length; i++) {
compare(dnInfo[0], dnInfo1[0]);
}
}
示例3: RecoveryTaskContiguous
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入方法依赖的package包/类
RecoveryTaskContiguous(RecoveringBlock rBlock) {
this.rBlock = rBlock;
block = rBlock.getBlock();
bpid = block.getBlockPoolId();
locs = rBlock.getLocations();
recoveryId = rBlock.getNewGenerationStamp();
}
示例4: logRecoverBlock
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入方法依赖的package包/类
private static void logRecoverBlock(String who, RecoveringBlock rb) {
ExtendedBlock block = rb.getBlock();
DatanodeInfo[] targets = rb.getLocations();
LOG.info(who + " calls recoverBlock(" + block
+ ", targets=[" + Joiner.on(", ").join(targets) + "]"
+ ", newGenerationStamp=" + rb.getNewGenerationStamp()
+ ", newBlock=" + rb.getNewBlock()
+ ", isStriped=" + rb.isStriped()
+ ")");
}
示例5: logRecoverBlock
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入方法依赖的package包/类
private static void logRecoverBlock(String who, RecoveringBlock rb) {
ExtendedBlock block = rb.getBlock();
DatanodeInfo[] targets = rb.getLocations();
LOG.info(who + " calls recoverBlock(" + block + ", targets=[" +
Joiner.on(", ").join(targets) + "]" + ", newGenerationStamp=" +
rb.getNewGenerationStamp() + ")");
}
示例6: testConvertRecoveringBlock
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入方法依赖的package包/类
@Test
public void testConvertRecoveringBlock() {
DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] dnInfo = new DatanodeInfo[]{di1, di2};
RecoveringBlock b = new RecoveringBlock(getExtendedBlock(), dnInfo, 3);
RecoveringBlockProto bProto = PBHelper.convert(b);
RecoveringBlock b1 = PBHelper.convert(bProto);
assertEquals(b.getBlock(), b1.getBlock());
DatanodeInfo[] dnInfo1 = b1.getLocations();
assertEquals(dnInfo.length, dnInfo1.length);
for (DatanodeInfo aDnInfo : dnInfo) {
compare(dnInfo[0], dnInfo1[0]);
}
}
示例7: recoverBlock
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入方法依赖的package包/类
/** Recover a block */
private void recoverBlock(RecoveringBlock rBlock) throws IOException {
ExtendedBlock block = rBlock.getBlock();
String blookPoolId = block.getBlockPoolId();
DatanodeID[] datanodeids = rBlock.getLocations();
List<BlockRecord> syncList = new ArrayList<BlockRecord>(datanodeids.length);
int errorCount = 0;
//check generation stamps
for(DatanodeID id : datanodeids) {
try {
BPOfferService bpos = blockPoolManager.get(blookPoolId);
DatanodeRegistration bpReg = bpos.bpRegistration;
InterDatanodeProtocol datanode = bpReg.equals(id)?
this: DataNode.createInterDataNodeProtocolProxy(id, getConf(),
dnConf.socketTimeout, dnConf.connectToDnViaHostname);
ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);
if (info != null &&
info.getGenerationStamp() >= block.getGenerationStamp() &&
info.getNumBytes() > 0) {
syncList.add(new BlockRecord(id, datanode, info));
}
} catch (RecoveryInProgressException ripE) {
InterDatanodeProtocol.LOG.warn(
"Recovery for replica " + block + " on data-node " + id
+ " is already in progress. Recovery id = "
+ rBlock.getNewGenerationStamp() + " is aborted.", ripE);
return;
} catch (IOException e) {
++errorCount;
InterDatanodeProtocol.LOG.warn(
"Failed to obtain replica info for block (=" + block
+ ") from datanode (=" + id + ")", e);
}
}
if (errorCount == datanodeids.length) {
throw new IOException("All datanodes failed: block=" + block
+ ", datanodeids=" + Arrays.asList(datanodeids));
}
syncBlock(rBlock, syncList);
}
示例8: recoverBlock
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入方法依赖的package包/类
/**
* Recover a block
*/
private void recoverBlock(RecoveringBlock rBlock) throws IOException {
ExtendedBlock block = rBlock.getBlock();
String blookPoolId = block.getBlockPoolId();
DatanodeID[] datanodeids = rBlock.getLocations();
List<BlockRecord> syncList = new ArrayList<>(datanodeids.length);
int errorCount = 0;
//check generation stamps
for (DatanodeID id : datanodeids) {
try {
BPOfferService bpos = blockPoolManager.get(blookPoolId);
DatanodeRegistration bpReg = bpos.bpRegistration;
InterDatanodeProtocol datanode = bpReg.equals(id) ? this : DataNode
.createInterDataNodeProtocolProxy(id, getConf(),
dnConf.socketTimeout, dnConf.connectToDnViaHostname);
ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);
if (info != null &&
info.getGenerationStamp() >= block.getGenerationStamp() &&
info.getNumBytes() > 0) {
syncList.add(new BlockRecord(id, datanode, info));
}
} catch (RecoveryInProgressException ripE) {
InterDatanodeProtocol.LOG.warn(
"Recovery for replica " + block + " on data-node " + id +
" is already in progress. Recovery id = " +
rBlock.getNewGenerationStamp() + " is aborted.", ripE);
return;
} catch (IOException e) {
++errorCount;
InterDatanodeProtocol.LOG.warn(
"Failed to obtain replica info for block (=" + block +
") from datanode (=" + id + ")", e);
}
}
if (errorCount == datanodeids.length) {
throw new IOException(
"All datanodes failed: block=" + block + ", datanodeids=" +
Arrays.asList(datanodeids));
}
syncBlock(rBlock, syncList);
}