本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock类的典型用法代码示例。如果您正苦于以下问题:Java RecoveringBlock类的具体用法?Java RecoveringBlock怎么用?Java RecoveringBlock使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RecoveringBlock类属于org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand包,在下文中一共展示了RecoveringBlock类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: recoverBlocks
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入依赖的package包/类
public Daemon recoverBlocks(
final String who,
final Collection<RecoveringBlock> blocks) {
Daemon d = new Daemon(threadGroup, new Runnable() {
/** Recover a list of blocks. It is run by the primary datanode. */
@Override
public void run() {
for(RecoveringBlock b : blocks) {
try {
logRecoverBlock(who, b);
recoverBlock(b);
} catch (IOException e) {
LOG.warn("recoverBlocks FAILED: " + b, e);
}
}
}
});
d.start();
return d;
}
示例2: initReplicaRecovery
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入依赖的package包/类
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
throws IOException {
InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
.newBuilder().setBlock(PBHelper.convert(rBlock)).build();
InitReplicaRecoveryResponseProto resp;
try {
resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
if (!resp.getReplicaFound()) {
// No replica found on the remote node.
return null;
} else {
if (!resp.hasBlock() || !resp.hasState()) {
throw new IOException("Replica was found but missing fields. " +
"Req: " + req + "\n" +
"Resp: " + resp);
}
}
BlockProto b = resp.getBlock();
return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
b.getGenStamp(), PBHelper.convert(resp.getState()));
}
示例3: initReplicaRecovery
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入依赖的package包/类
@Override
public InitReplicaRecoveryResponseProto initReplicaRecovery(
RpcController unused, InitReplicaRecoveryRequestProto request)
throws ServiceException {
RecoveringBlock b = PBHelper.convert(request.getBlock());
ReplicaRecoveryInfo r;
try {
r = impl.initReplicaRecovery(b);
} catch (IOException e) {
throw new ServiceException(e);
}
if (r == null) {
return InitReplicaRecoveryResponseProto.newBuilder()
.setReplicaFound(false)
.build();
} else {
return InitReplicaRecoveryResponseProto.newBuilder()
.setReplicaFound(true)
.setBlock(PBHelper.convert(r))
.setState(PBHelper.convert(r.getOriginalReplicaState())).build();
}
}
示例4: testInterDNProtocolTimeout
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入依赖的package包/类
/** Test to verify that InterDatanode RPC timesout as expected when
* the server DN does not respond.
*/
@Test(expected=SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
final Server server = new TestServer(1, true);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
InterDatanodeProtocol proxy = null;
try {
proxy = DataNode.createInterDataNodeProtocolProxy(
dInfo, conf, 500, false);
proxy.initReplicaRecovery(new RecoveringBlock(
new ExtendedBlock("bpid", 1), null, 100));
fail ("Expected SocketTimeoutException exception, but did not get.");
} finally {
if (proxy != null) {
RPC.stopProxy(proxy);
}
server.stop();
}
}
示例5: testSyncReplicas
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入依赖的package包/类
/** Sync two replicas */
private void testSyncReplicas(ReplicaRecoveryInfo replica1,
ReplicaRecoveryInfo replica2,
InterDatanodeProtocol dn1,
InterDatanodeProtocol dn2,
long expectLen) throws IOException {
DatanodeInfo[] locs = new DatanodeInfo[]{
mock(DatanodeInfo.class), mock(DatanodeInfo.class)};
RecoveringBlock rBlock = new RecoveringBlock(block,
locs, RECOVERY_ID);
ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
BlockRecord record1 = new BlockRecord(
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
BlockRecord record2 = new BlockRecord(
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
syncList.add(record1);
syncList.add(record2);
when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
anyLong(), anyLong())).thenReturn("storage1");
when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
anyLong(), anyLong())).thenReturn("storage2");
dn.syncBlock(rBlock, syncList);
}
示例6: testZeroLenReplicas
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入依赖的package包/类
/**
* BlockRecoveryFI_07. max replica length from all DNs is zero.
*
* @throws IOException in case of an error
*/
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
DataNode spyDN = spy(dn);
doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
initReplicaRecovery(any(RecoveringBlock.class));
Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
d.join();
DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
verify(dnP).commitBlockSynchronization(
block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
示例7: testConvertBlockRecoveryCommand
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入依赖的package包/类
@Test
public void testConvertBlockRecoveryCommand() {
DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
List<RecoveringBlock> blks = ImmutableList.of(
new RecoveringBlock(getExtendedBlock(1), dnInfo, 3),
new RecoveringBlock(getExtendedBlock(2), dnInfo, 3)
);
BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks);
BlockRecoveryCommandProto proto = PBHelper.convert(cmd);
assertEquals(1, proto.getBlocks(0).getBlock().getB().getBlockId());
assertEquals(2, proto.getBlocks(1).getBlock().getB().getBlockId());
BlockRecoveryCommand cmd2 = PBHelper.convert(proto);
List<RecoveringBlock> cmd2Blks = Lists.newArrayList(
cmd2.getRecoveringBlocks());
assertEquals(blks.get(0).getBlock(), cmd2Blks.get(0).getBlock());
assertEquals(blks.get(1).getBlock(), cmd2Blks.get(1).getBlock());
assertEquals(Joiner.on(",").join(blks), Joiner.on(",").join(cmd2Blks));
assertEquals(cmd.toString(), cmd2.toString());
}
示例8: recoverBlocks
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入依赖的package包/类
public Daemon recoverBlocks(final String who,
final Collection<RecoveringBlock> blocks) {
Daemon d = new Daemon(datanode.threadGroup, new Runnable() {
@Override
public void run() {
for(RecoveringBlock b : blocks) {
try {
logRecoverBlock(who, b);
if (b.isStriped()) {
new RecoveryTaskStriped((RecoveringStripedBlock) b).recover();
} else {
new RecoveryTaskContiguous(b).recover();
}
} catch (IOException e) {
LOG.warn("recoverBlocks FAILED: " + b, e);
}
}
}
});
d.start();
return d;
}
示例9: convert
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入依赖的package包/类
public static RecoveringBlockProto convert(RecoveringBlock b) {
if (b == null) {
return null;
}
LocatedBlockProto lb = PBHelperClient.convertLocatedBlock(b);
RecoveringBlockProto.Builder builder = RecoveringBlockProto.newBuilder();
builder.setBlock(lb).setNewGenStamp(b.getNewGenerationStamp());
if(b.getNewBlock() != null)
builder.setTruncateBlock(PBHelperClient.convert(b.getNewBlock()));
if (b instanceof RecoveringStripedBlock) {
RecoveringStripedBlock sb = (RecoveringStripedBlock) b;
builder.setEcPolicy(PBHelperClient.convertErasureCodingPolicy(
sb.getErasureCodingPolicy()));
builder.setBlockIndices(PBHelperClient.getByteString(sb.getBlockIndices()));
}
return builder.build();
}
示例10: initReplicaRecovery
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入依赖的package包/类
@Override
public InitReplicaRecoveryResponseProto initReplicaRecovery(
RpcController unused, InitReplicaRecoveryRequestProto request)
throws ServiceException {
RecoveringBlock b = PBHelper.convert(request.getBlock());
ReplicaRecoveryInfo r;
try {
r = impl.initReplicaRecovery(b);
} catch (IOException e) {
throw new ServiceException(e);
}
if (r == null) {
return InitReplicaRecoveryResponseProto.newBuilder()
.setReplicaFound(false)
.build();
} else {
return InitReplicaRecoveryResponseProto.newBuilder()
.setReplicaFound(true)
.setBlock(PBHelperClient.convert(r))
.setState(PBHelper.convert(r.getOriginalReplicaState())).build();
}
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:InterDatanodeProtocolServerSideTranslatorPB.java
示例11: testRecoveryInProgressException
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入依赖的package包/类
/**
* BlockRecoveryFI_05. One DN throws RecoveryInProgressException.
*
* @throws IOException
* in case of an error
*/
@Test
public void testRecoveryInProgressException()
throws IOException, InterruptedException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
doThrow(new RecoveryInProgressException("Replica recovery is in progress")).
when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
for(RecoveringBlock rBlock: initRecoveringBlocks()){
BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous =
recoveryWorker.new RecoveryTaskContiguous(rBlock);
BlockRecoveryWorker.RecoveryTaskContiguous spyTask
= spy(RecoveryTaskContiguous);
spyTask.recover();
verify(spyTask, never()).syncBlock(anyListOf(BlockRecord.class));
}
}
示例12: testErrorReplicas
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入依赖的package包/类
/**
* BlockRecoveryFI_06. all datanodes throws an exception.
*
* @throws IOException
* in case of an error
*/
@Test
public void testErrorReplicas() throws IOException, InterruptedException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
doThrow(new IOException()).
when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
for(RecoveringBlock rBlock: initRecoveringBlocks()){
BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous =
recoveryWorker.new RecoveryTaskContiguous(rBlock);
BlockRecoveryWorker.RecoveryTaskContiguous spyTask = spy(RecoveryTaskContiguous);
try {
spyTask.recover();
fail();
} catch(IOException e){
GenericTestUtils.assertExceptionContains("All datanodes failed", e);
}
verify(spyTask, never()).syncBlock(anyListOf(BlockRecord.class));
}
}
示例13: testZeroLenReplicas
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入依赖的package包/类
/**
* BlockRecoveryFI_07. max replica length from all DNs is zero.
*
* @throws IOException in case of an error
*/
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
initReplicaRecovery(any(RecoveringBlock.class));
for(RecoveringBlock rBlock: initRecoveringBlocks()){
BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous =
recoveryWorker.new RecoveryTaskContiguous(rBlock);
BlockRecoveryWorker.RecoveryTaskContiguous spyTask
= spy(RecoveryTaskContiguous);
spyTask.recover();
}
DatanodeProtocol dnP = recoveryWorker.getActiveNamenodeForBP(POOL_ID);
verify(dnP).commitBlockSynchronization(
block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
示例14: testInterDNProtocolTimeout
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入依赖的package包/类
/**
* Test to verify that InterDatanode RPC timesout as expected when
* the server DN does not respond.
*/
@Test(expected = SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
final Server server = new TestServer(1, true);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
InterDatanodeProtocol proxy = null;
try {
proxy =
DataNode.createInterDataNodeProtocolProxy(dInfo, conf, 500, false);
proxy.initReplicaRecovery(
new RecoveringBlock(new ExtendedBlock("bpid", 1), null, 100));
fail("Expected SocketTimeoutException exception, but did not get.");
} finally {
if (proxy != null) {
RPC.stopProxy(proxy);
}
server.stop();
}
}
示例15: testSyncReplicas
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; //导入依赖的package包/类
/** Sync two replicas */
private void testSyncReplicas(ReplicaRecoveryInfo replica1,
ReplicaRecoveryInfo replica2,
InterDatanodeProtocol dn1,
InterDatanodeProtocol dn2,
long expectLen) throws IOException {
DatanodeInfo[] locs = new DatanodeInfo[]{
mock(DatanodeInfo.class), mock(DatanodeInfo.class)};
RecoveringBlock rBlock = new RecoveringBlock(block,
locs, RECOVERY_ID);
ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
BlockRecord record1 = new BlockRecord(
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
BlockRecord record2 = new BlockRecord(
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
syncList.add(record1);
syncList.add(record2);
when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
anyLong())).thenReturn("storage1");
when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
anyLong())).thenReturn("storage2");
dn.syncBlock(rBlock, syncList);
}