本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol类的典型用法代码示例。如果您正苦于以下问题:Java InterDatanodeProtocol类的具体用法?Java InterDatanodeProtocol怎么用?Java InterDatanodeProtocol使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
InterDatanodeProtocol类属于org.apache.hadoop.hdfs.server.protocol包,在下文中一共展示了InterDatanodeProtocol类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createInterDataNodeProtocolProxy
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; //导入依赖的package包/类
public static InterDatanodeProtocol createInterDataNodeProtocolProxy(
DatanodeID datanodeid, final Configuration conf, final int socketTimeout,
final boolean connectToDnViaHostname) throws IOException {
final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
final InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
}
final UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
try {
return loginUgi
.doAs(new PrivilegedExceptionAction<InterDatanodeProtocol>() {
@Override
public InterDatanodeProtocol run() throws IOException {
return new InterDatanodeProtocolTranslatorPB(addr, loginUgi,
conf, NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
});
} catch (InterruptedException ie) {
throw new IOException(ie.getMessage());
}
}
示例2: testInterDNProtocolTimeout
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; //导入依赖的package包/类
/** Test to verify that InterDatanode RPC timesout as expected when
* the server DN does not respond.
*/
@Test(expected=SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
final Server server = new TestServer(1, true);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
InterDatanodeProtocol proxy = null;
try {
proxy = DataNode.createInterDataNodeProtocolProxy(
dInfo, conf, 500, false);
proxy.initReplicaRecovery(new RecoveringBlock(
new ExtendedBlock("bpid", 1), null, 100));
fail ("Expected SocketTimeoutException exception, but did not get.");
} finally {
if (proxy != null) {
RPC.stopProxy(proxy);
}
server.stop();
}
}
示例3: testSyncReplicas
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; //导入依赖的package包/类
/** Sync two replicas */
private void testSyncReplicas(ReplicaRecoveryInfo replica1,
ReplicaRecoveryInfo replica2,
InterDatanodeProtocol dn1,
InterDatanodeProtocol dn2,
long expectLen) throws IOException {
DatanodeInfo[] locs = new DatanodeInfo[]{
mock(DatanodeInfo.class), mock(DatanodeInfo.class)};
RecoveringBlock rBlock = new RecoveringBlock(block,
locs, RECOVERY_ID);
ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
BlockRecord record1 = new BlockRecord(
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
BlockRecord record2 = new BlockRecord(
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
syncList.add(record1);
syncList.add(record2);
when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
anyLong(), anyLong())).thenReturn("storage1");
when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
anyLong(), anyLong())).thenReturn("storage2");
dn.syncBlock(rBlock, syncList);
}
示例4: testRBWReplicas
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; //导入依赖的package包/类
/**
* BlockRecovery_02.11.
* Two replicas are RBW.
* @throws IOException in case of an error
*/
@Test
public void testRBWReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RBW);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
}
示例5: testRBW_RWRReplicas
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; //导入依赖的package包/类
/**
* BlockRecovery_02.12.
* One replica is RBW and another is RWR.
* @throws IOException in case of an error
*/
@Test
public void testRBW_RWRReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-2, ReplicaState.RWR);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
REPLICA_LEN1);
verify(dn2, never()).updateReplicaUnderRecovery(
block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
示例6: testRWRReplicas
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; //导入依赖的package包/类
/**
* BlockRecovery_02.13.
* Two replicas are RWR.
* @throws IOException in case of an error
*/
@Test
public void testRWRReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RWR);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RWR);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
}
示例7: testRBW_RWRReplicas
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; //导入依赖的package包/类
/**
* BlockRecovery_02.12.
* One replica is RBW and another is RWR.
* @throws IOException in case of an error
*/
@Test
public void testRBW_RWRReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-2, ReplicaState.RWR);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
verify(dn2, never()).updateReplicaUnderRecovery(
block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
示例8: testSyncReplicas
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; //导入依赖的package包/类
/** Sync two replicas */
private void testSyncReplicas(ReplicaRecoveryInfo replica1,
ReplicaRecoveryInfo replica2,
InterDatanodeProtocol dn1,
InterDatanodeProtocol dn2,
long expectLen) throws IOException {
DatanodeInfo[] locs = new DatanodeInfo[]{
mock(DatanodeInfo.class), mock(DatanodeInfo.class)};
RecoveringBlock rBlock = new RecoveringBlock(block,
locs, RECOVERY_ID);
ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
BlockRecord record1 = new BlockRecord(
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
BlockRecord record2 = new BlockRecord(
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
syncList.add(record1);
syncList.add(record2);
when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
anyLong())).thenReturn("storage1");
when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
anyLong())).thenReturn("storage2");
dn.syncBlock(rBlock, syncList);
}
示例9: testRBWReplicas
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; //导入依赖的package包/类
/**
* BlockRecovery_02.11.
* Two replicas are RBW.
* @throws IOException in case of an error
*/
@Test
public void testRBWReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RBW);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, minLen);
verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, minLen);
}
示例10: testRBW_RWRReplicas
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; //导入依赖的package包/类
/**
* BlockRecovery_02.12.
* One replica is RBW and another is RWR.
* @throws IOException in case of an error
*/
@Test
public void testRBW_RWRReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-2, ReplicaState.RWR);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
verify(dn2, never()).updateReplicaUnderRecovery(
block, RECOVERY_ID, REPLICA_LEN1);
}
示例11: testRWRReplicas
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; //导入依赖的package包/类
/**
* BlockRecovery_02.13.
* Two replicas are RWR.
* @throws IOException in case of an error
*/
@Test
public void testRWRReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RWR);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RWR);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, minLen);
verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, minLen);
}
示例12: getBlockInputStream
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; //导入依赖的package包/类
@Override
public InputStream getBlockInputStream(DataNode datanode, long offset) throws IOException {
File f = getDataFileToRead();
if(f.exists()) {
if (offset <= 0) {
return new FileInputStream(f);
} else {
RandomAccessFile blockInFile = new RandomAccessFile(f, "r");
blockInFile.seek(offset);
return new FileInputStream(blockInFile.getFD());
}
}
// if file is not null, but doesn't exist - possibly disk failed
if (datanode != null) {
datanode.checkDiskError();
}
if (InterDatanodeProtocol.LOG.isDebugEnabled()) {
InterDatanodeProtocol.LOG
.debug("DatanodeBlockInfo.getBlockInputStream failure. file: " + f);
}
throw new IOException("Cannot open file " + f);
}
示例13: createInterDataNodeProtocolProxy
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; //导入依赖的package包/类
public static InterDatanodeProtocol createInterDataNodeProtocolProxy(
DatanodeID datanodeid, Configuration conf, final int socketTimeout)
throws IOException {
InetSocketAddress addr = NetUtils.createSocketAddr(
datanodeid.getHost() + ":" + datanodeid.getIpcPort());
if (InterDatanodeProtocol.LOG.isDebugEnabled()) {
InterDatanodeProtocol.LOG.info("InterDatanodeProtocol addr=" + addr);
}
UserGroupInformation ugi;
try {
ugi = UserGroupInformation.login(conf);
} catch (LoginException le) {
throw new RuntimeException("Couldn't login!");
}
return (InterDatanodeProtocol)RPC.getProxy(InterDatanodeProtocol.class,
InterDatanodeProtocol.versionID, addr,
ugi, conf,
NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
示例14: call
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; //导入依赖的package包/类
public Boolean call() throws Exception {
InterDatanodeProtocol remoteDatanode = null;
try {
File srcBlockFile = data.getBlockFile(srcNamespaceId, srcBlock);
remoteDatanode = DataNode
.createInterDataNodeProtocolProxy(target, getConf(), socketTimeout);
remoteDatanode.copyBlockLocal(srcFileSystem, srcNamespaceId, srcBlock,
dstNamespaceId, dstBlock,
srcBlockFile.getAbsolutePath());
} catch (IOException e) {
LOG.warn("Cross datanode local block copy failed", e);
throw e;
} finally {
if (remoteDatanode != null) {
stopDatanodeProxy(remoteDatanode);
}
}
return true;
}
示例15: createInterDataNodeProtocolProxy
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; //导入依赖的package包/类
public static InterDatanodeProtocol createInterDataNodeProtocolProxy(
DatanodeID datanodeid, final Configuration conf, final int socketTimeout,
final boolean connectToDnViaHostname) throws IOException {
final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
final InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
}
final UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
try {
return loginUgi
.doAs(new PrivilegedExceptionAction<InterDatanodeProtocol>() {
@Override
public InterDatanodeProtocol run() throws IOException {
return new InterDatanodeProtocolTranslatorPB(addr, loginUgi, conf,
NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
});
} catch (InterruptedException ie) {
throw new IOException(ie.getMessage());
}
}