本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo类的典型用法代码示例。如果您正苦于以下问题:Java ReplicaRecoveryInfo类的具体用法?Java ReplicaRecoveryInfo怎么用?Java ReplicaRecoveryInfo使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ReplicaRecoveryInfo类属于org.apache.hadoop.hdfs.server.protocol包,在下文中一共展示了ReplicaRecoveryInfo类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initReplicaRecovery
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入依赖的package包/类
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
throws IOException {
InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
.newBuilder().setBlock(PBHelper.convert(rBlock)).build();
InitReplicaRecoveryResponseProto resp;
try {
resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
if (!resp.getReplicaFound()) {
// No replica found on the remote node.
return null;
} else {
if (!resp.hasBlock() || !resp.hasState()) {
throw new IOException("Replica was found but missing fields. " +
"Req: " + req + "\n" +
"Resp: " + resp);
}
}
BlockProto b = resp.getBlock();
return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
b.getGenStamp(), PBHelper.convert(resp.getState()));
}
示例2: initReplicaRecovery
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入依赖的package包/类
@Override
public InitReplicaRecoveryResponseProto initReplicaRecovery(
RpcController unused, InitReplicaRecoveryRequestProto request)
throws ServiceException {
RecoveringBlock b = PBHelper.convert(request.getBlock());
ReplicaRecoveryInfo r;
try {
r = impl.initReplicaRecovery(b);
} catch (IOException e) {
throw new ServiceException(e);
}
if (r == null) {
return InitReplicaRecoveryResponseProto.newBuilder()
.setReplicaFound(false)
.build();
} else {
return InitReplicaRecoveryResponseProto.newBuilder()
.setReplicaFound(true)
.setBlock(PBHelper.convert(r))
.setState(PBHelper.convert(r.getOriginalReplicaState())).build();
}
}
示例3: testSyncReplicas
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入依赖的package包/类
/** Sync two replicas */
private void testSyncReplicas(ReplicaRecoveryInfo replica1,
ReplicaRecoveryInfo replica2,
InterDatanodeProtocol dn1,
InterDatanodeProtocol dn2,
long expectLen) throws IOException {
DatanodeInfo[] locs = new DatanodeInfo[]{
mock(DatanodeInfo.class), mock(DatanodeInfo.class)};
RecoveringBlock rBlock = new RecoveringBlock(block,
locs, RECOVERY_ID);
ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
BlockRecord record1 = new BlockRecord(
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
BlockRecord record2 = new BlockRecord(
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
syncList.add(record1);
syncList.add(record2);
when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
anyLong(), anyLong())).thenReturn("storage1");
when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
anyLong(), anyLong())).thenReturn("storage2");
dn.syncBlock(rBlock, syncList);
}
示例4: testRBWReplicas
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入依赖的package包/类
/**
* BlockRecovery_02.11.
* Two replicas are RBW.
* @throws IOException in case of an error
*/
@Test
public void testRBWReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RBW);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
}
示例5: testRBW_RWRReplicas
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入依赖的package包/类
/**
* BlockRecovery_02.12.
* One replica is RBW and another is RWR.
* @throws IOException in case of an error
*/
@Test
public void testRBW_RWRReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-2, ReplicaState.RWR);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
REPLICA_LEN1);
verify(dn2, never()).updateReplicaUnderRecovery(
block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
示例6: testRWRReplicas
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入依赖的package包/类
/**
* BlockRecovery_02.13.
* Two replicas are RWR.
* @throws IOException in case of an error
*/
@Test
public void testRWRReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RWR);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RWR);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
}
示例7: testZeroLenReplicas
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入依赖的package包/类
/**
* BlockRecoveryFI_07. max replica length from all DNs is zero.
*
* @throws IOException in case of an error
*/
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
DataNode spyDN = spy(dn);
doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
initReplicaRecovery(any(RecoveringBlock.class));
Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
d.join();
DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
verify(dnP).commitBlockSynchronization(
block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
示例8: getSafeLength
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入依赖的package包/类
/**
* TODO: the current implementation depends on the assumption that the
* parity cells are only generated based on the full stripe. This is not
* true after we support hflush.
*/
@VisibleForTesting
long getSafeLength(Map<Long, BlockRecord> syncBlocks) {
final int cellSize = ecPolicy.getCellSize();
final int dataBlkNum = ecPolicy.getNumDataUnits();
Preconditions.checkArgument(syncBlocks.size() >= dataBlkNum);
final int stripeSize = dataBlkNum * cellSize;
long[] blockLengths = new long[syncBlocks.size()];
int i = 0;
for (BlockRecord r : syncBlocks.values()) {
ReplicaRecoveryInfo rInfo = r.getReplicaRecoveryInfo();
blockLengths[i++] = rInfo.getNumBytes();
}
Arrays.sort(blockLengths);
// full stripe is a stripe has at least dataBlkNum full cells.
// lastFullStripeIdx is the index of the last full stripe.
int lastFullStripeIdx =
(int) (blockLengths[blockLengths.length - dataBlkNum] / cellSize);
return lastFullStripeIdx * stripeSize; // return the safeLength
// TODO: Include lastFullStripeIdx+1 stripe in safeLength, if there exists
// such a stripe (and it must be partial).
}
示例9: initReplicaRecovery
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入依赖的package包/类
@Override
public InitReplicaRecoveryResponseProto initReplicaRecovery(
RpcController unused, InitReplicaRecoveryRequestProto request)
throws ServiceException {
RecoveringBlock b = PBHelper.convert(request.getBlock());
ReplicaRecoveryInfo r;
try {
r = impl.initReplicaRecovery(b);
} catch (IOException e) {
throw new ServiceException(e);
}
if (r == null) {
return InitReplicaRecoveryResponseProto.newBuilder()
.setReplicaFound(false)
.build();
} else {
return InitReplicaRecoveryResponseProto.newBuilder()
.setReplicaFound(true)
.setBlock(PBHelperClient.convert(r))
.setState(PBHelper.convert(r.getOriginalReplicaState())).build();
}
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:InterDatanodeProtocolServerSideTranslatorPB.java
示例10: testRBW_RWRReplicas
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入依赖的package包/类
/**
* BlockRecovery_02.12.
* One replica is RBW and another is RWR.
* @throws IOException in case of an error
*/
@Test
public void testRBW_RWRReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-2, ReplicaState.RWR);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
verify(dn2, never()).updateReplicaUnderRecovery(
block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
示例11: testZeroLenReplicas
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入依赖的package包/类
/**
* BlockRecoveryFI_07. max replica length from all DNs is zero.
*
* @throws IOException in case of an error
*/
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
initReplicaRecovery(any(RecoveringBlock.class));
for(RecoveringBlock rBlock: initRecoveringBlocks()){
BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous =
recoveryWorker.new RecoveryTaskContiguous(rBlock);
BlockRecoveryWorker.RecoveryTaskContiguous spyTask
= spy(RecoveryTaskContiguous);
spyTask.recover();
}
DatanodeProtocol dnP = recoveryWorker.getActiveNamenodeForBP(POOL_ID);
verify(dnP).commitBlockSynchronization(
block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
示例12: testSafeLength
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入依赖的package包/类
@Test
public void testSafeLength() throws Exception {
ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager
.getSystemDefaultPolicy();
RecoveringStripedBlock rBlockStriped = new RecoveringStripedBlock(rBlock,
new byte[9], ecPolicy);
BlockRecoveryWorker recoveryWorker = new BlockRecoveryWorker(dn);
BlockRecoveryWorker.RecoveryTaskStriped recoveryTask =
recoveryWorker.new RecoveryTaskStriped(rBlockStriped);
for (int i = 0; i < BLOCK_LENGTHS_SUITE.length; i++) {
int[] blockLengths = BLOCK_LENGTHS_SUITE[i][0];
int safeLength = BLOCK_LENGTHS_SUITE[i][1][0];
Map<Long, BlockRecord> syncList = new HashMap<>();
for (int id = 0; id < blockLengths.length; id++) {
ReplicaRecoveryInfo rInfo = new ReplicaRecoveryInfo(id,
blockLengths[id], 0, null);
syncList.put((long) id, new BlockRecord(null, null, rInfo));
}
Assert.assertEquals("BLOCK_LENGTHS_SUITE[" + i + "]", safeLength,
recoveryTask.getSafeLength(syncList));
}
}
示例13: testSyncReplicas
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入依赖的package包/类
/** Sync two replicas */
private void testSyncReplicas(ReplicaRecoveryInfo replica1,
ReplicaRecoveryInfo replica2,
InterDatanodeProtocol dn1,
InterDatanodeProtocol dn2,
long expectLen) throws IOException {
DatanodeInfo[] locs = new DatanodeInfo[]{
mock(DatanodeInfo.class), mock(DatanodeInfo.class)};
RecoveringBlock rBlock = new RecoveringBlock(block,
locs, RECOVERY_ID);
ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
BlockRecord record1 = new BlockRecord(
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
BlockRecord record2 = new BlockRecord(
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
syncList.add(record1);
syncList.add(record2);
when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
anyLong())).thenReturn("storage1");
when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
anyLong())).thenReturn("storage2");
dn.syncBlock(rBlock, syncList);
}
示例14: testRBWReplicas
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入依赖的package包/类
/**
* BlockRecovery_02.11.
* Two replicas are RBW.
* @throws IOException in case of an error
*/
@Test
public void testRBWReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RBW);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, minLen);
verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, minLen);
}
示例15: testRBW_RWRReplicas
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; //导入依赖的package包/类
/**
* BlockRecovery_02.12.
* One replica is RBW and another is RWR.
* @throws IOException in case of an error
*/
@Test
public void testRBW_RWRReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-2, ReplicaState.RWR);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
verify(dn2, never()).updateReplicaUnderRecovery(
block, RECOVERY_ID, REPLICA_LEN1);
}