本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol类的典型用法代码示例。如果您正苦于以下问题:Java DatanodeProtocol类的具体用法?Java DatanodeProtocol怎么用?Java DatanodeProtocol使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
DatanodeProtocol类属于org.apache.hadoop.hdfs.server.protocol包,在下文中一共展示了DatanodeProtocol类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getProtocolVersion
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; //导入依赖的package包/类
public long getProtocolVersion(String protocol,
long clientVersion) throws IOException {
if (protocol.equals(ClientProtocol.class.getName())) {
return ClientProtocol.versionID;
} else if (protocol.equals(DatanodeProtocol.class.getName())){
return DatanodeProtocol.versionID;
} else if (protocol.equals(NamenodeProtocol.class.getName())){
return NamenodeProtocol.versionID;
} else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){
return RefreshAuthorizationPolicyProtocol.versionID;
} else if (protocol.equals(RefreshUserMappingsProtocol.class.getName())){
return RefreshUserMappingsProtocol.versionID;
} else if (protocol.equals(RefreshCallQueueProtocol.class.getName())) {
return RefreshCallQueueProtocol.versionID;
} else if (protocol.equals(GetUserMappingsProtocol.class.getName())){
return GetUserMappingsProtocol.versionID;
} else if (protocol.equals(TraceAdminProtocol.class.getName())){
return TraceAdminProtocol.versionID;
} else {
throw new IOException("Unknown protocol to name node: " + protocol);
}
}
示例2: errorReport
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; //导入依赖的package包/类
@Override // DatanodeProtocol
public void errorReport(DatanodeRegistration nodeReg,
int errorCode, String msg) throws IOException {
checkNNStartup();
String dnName =
(nodeReg == null) ? "Unknown DataNode" : nodeReg.toString();
if (errorCode == DatanodeProtocol.NOTIFY) {
LOG.info("Error report from " + dnName + ": " + msg);
return;
}
verifyRequest(nodeReg);
if (errorCode == DatanodeProtocol.DISK_ERROR) {
LOG.warn("Disk error on " + dnName + ": " + msg);
} else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) {
LOG.warn("Fatal disk error on " + dnName + ": " + msg);
namesystem.getBlockManager().getDatanodeManager().removeDatanode(nodeReg);
} else {
LOG.info("Error report from " + dnName + ": " + msg);
}
}
示例3: handleDiskError
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; //导入依赖的package包/类
private void handleDiskError(String errMsgr) {
final boolean hasEnoughResources = data.hasEnoughResource();
LOG.warn("DataNode.handleDiskError: Keep Running: " + hasEnoughResources);
// If we have enough active valid volumes then we do not want to
// shutdown the DN completely.
int dpError = hasEnoughResources ? DatanodeProtocol.DISK_ERROR
: DatanodeProtocol.FATAL_DISK_ERROR;
metrics.incrVolumeFailures();
//inform NameNodes
for(BPOfferService bpos: blockPoolManager.getAllNamenodeThreads()) {
bpos.trySendErrorReport(dpError, errMsgr);
}
if(hasEnoughResources) {
scheduleAllBlockReport(0);
return; // do not shutdown
}
LOG.warn("DataNode is shutting down: " + errMsgr);
shouldRun = false;
}
示例4: convert
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; //导入依赖的package包/类
public static BlockIdCommandProto convert(BlockIdCommand cmd) {
BlockIdCommandProto.Builder builder = BlockIdCommandProto.newBuilder()
.setBlockPoolId(cmd.getBlockPoolId());
switch (cmd.getAction()) {
case DatanodeProtocol.DNA_CACHE:
builder.setAction(BlockIdCommandProto.Action.CACHE);
break;
case DatanodeProtocol.DNA_UNCACHE:
builder.setAction(BlockIdCommandProto.Action.UNCACHE);
break;
default:
throw new AssertionError("Invalid action");
}
long[] blockIds = cmd.getBlockIds();
for (int i = 0; i < blockIds.length; i++) {
builder.addBlockIds(blockIds[i]);
}
return builder.build();
}
示例5: replicateBlocks
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; //导入依赖的package包/类
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
// register datanode
StorageReport[] rep = { new StorageReport(storage,
false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
rep, 0L, 0L, 0, 0, 0, null).getCommands();
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand)cmd;
return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
bcmd.getTargetStorageIDs());
}
}
}
return 0;
}
示例6: testZeroLenReplicas
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; //导入依赖的package包/类
/**
* BlockRecoveryFI_07. max replica length from all DNs is zero.
*
* @throws IOException in case of an error
*/
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
DataNode spyDN = spy(dn);
doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
initReplicaRecovery(any(RecoveringBlock.class));
Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
d.join();
DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
verify(dnP).commitBlockSynchronization(
block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
示例7: testNoReplicaUnderRecovery
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; //导入依赖的package包/类
/**
* BlockRecoveryFI_10. DN has no ReplicaUnderRecovery.
*
* @throws IOException in case of an error
*/
@Test
public void testNoReplicaUnderRecovery() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
dn.data.createRbw(StorageType.DEFAULT, block, false);
try {
dn.syncBlock(rBlock, initBlockRecords(dn));
fail("Sync should fail");
} catch (IOException e) {
e.getMessage().startsWith("Cannot recover ");
}
DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
verify(namenode, never()).commitBlockSynchronization(
any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
anyBoolean(), any(DatanodeID[].class), any(String[].class));
}
示例8: replicateBlocks
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; //导入依赖的package包/类
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
// register datanode
StorageReport[] rep = { new StorageReport(storage,
false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration,
rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand)cmd;
return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
bcmd.getTargetStorageIDs());
}
}
}
return 0;
}
示例9: testZeroLenReplicas
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; //导入依赖的package包/类
/**
* BlockRecoveryFI_07. max replica length from all DNs is zero.
*
* @throws IOException in case of an error
*/
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
initReplicaRecovery(any(RecoveringBlock.class));
for(RecoveringBlock rBlock: initRecoveringBlocks()){
BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous =
recoveryWorker.new RecoveryTaskContiguous(rBlock);
BlockRecoveryWorker.RecoveryTaskContiguous spyTask
= spy(RecoveryTaskContiguous);
spyTask.recover();
}
DatanodeProtocol dnP = recoveryWorker.getActiveNamenodeForBP(POOL_ID);
verify(dnP).commitBlockSynchronization(
block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
示例10: testNoReplicaUnderRecovery
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; //导入依赖的package包/类
/**
* BlockRecoveryFI_10. DN has no ReplicaUnderRecovery.
*
* @throws IOException in case of an error
*/
@Test
public void testNoReplicaUnderRecovery() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
dn.data.createRbw(StorageType.DEFAULT, block, false);
BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous =
recoveryWorker.new RecoveryTaskContiguous(rBlock);
try {
RecoveryTaskContiguous.syncBlock(initBlockRecords(dn));
fail("Sync should fail");
} catch (IOException e) {
e.getMessage().startsWith("Cannot recover ");
}
DatanodeProtocol namenode = recoveryWorker.getActiveNamenodeForBP(POOL_ID);
verify(namenode, never()).commitBlockSynchronization(
any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
anyBoolean(), any(DatanodeID[].class), any(String[].class));
}
示例11: testSinglePortStartup
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; //导入依赖的package包/类
public void testSinglePortStartup() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
NameNode nn = cluster.getNameNode();
InetSocketAddress dnAddress = nn.getNameNodeDNAddress();
InetSocketAddress clientAddress = nn.getNameNodeAddress();
assertEquals(clientAddress, dnAddress);
DatanodeProtocol dnProtocol = (DatanodeProtocol) RPC.waitForProxy(
DatanodeProtocol.class, DatanodeProtocol.versionID, dnAddress, conf);
// perform a dummy call
dnProtocol.getProtocolVersion(DatanodeProtocol.class.getName(),
DatanodeProtocol.versionID);
ClientProtocol client = (ClientProtocol) RPC.waitForProxy(
ClientProtocol.class, ClientProtocol.versionID, dnAddress, conf);
client.getProtocolVersion(ClientProtocol.class.getName(),
ClientProtocol.versionID);
cluster.shutdown();
}
示例12: replicateBlocks
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; //导入依赖的package包/类
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
int replicateBlocks() throws IOException {
// register datanode
DatanodeCommand[] cmds = nameNode.sendHeartbeat(
dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand)cmd;
return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
}
}
}
return 0;
}
示例13: replicateBlocks
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; //导入依赖的package包/类
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
@SuppressWarnings("unused")
int replicateBlocks() throws IOException {
// register datanode
DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration,
DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand) cmd;
return transferBlocks(bcmd.getBlocks(),
bcmd.getTargets());
}
}
}
return 0;
}
示例14: OfferService
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; //导入依赖的package包/类
/**
* Offer service to the specified namenode
*/
public OfferService(AvatarDataNode anode, ServicePair servicePair,
DatanodeProtocol namenode, InetSocketAddress namenodeAddress,
AvatarProtocol avatarnode, InetSocketAddress avatarnodeAddress) {
this.anode = anode;
this.servicePair = servicePair;
this.namenode = namenode;
this.avatarnode = avatarnode;
this.namenodeAddress = namenodeAddress;
this.avatarnodeAddress = avatarnodeAddress;
nsRegistration = servicePair.nsRegistration;
data = anode.data;
myMetrics = anode.myMetrics;
scheduleBlockReport(anode.initialBlockReportDelay);
backlogSize = anode.getConf().getInt("dfs.datanode.blockreceived.backlog", 10000);
fullBlockReportDelay = anode.getConf().getInt(
"dfs.datanode.fullblockreport.delay", 5 * 60 * 1000);
blockReceivedRetryInterval = anode.getConf().getInt(
"dfs.datanode.blockreceived.retry.internval", 10000);
}
示例15: errorReport
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; //导入依赖的package包/类
/**
*/
public void errorReport(DatanodeRegistration nodeReg,
int errorCode,
String msg) throws IOException {
// Log error message from datanode
String dnName = (nodeReg == null ? "unknown DataNode" : nodeReg.getName());
LOG.info("Error report from " + dnName + ": " + msg);
if (errorCode == DatanodeProtocol.NOTIFY) {
return;
}
verifyRequest(nodeReg);
if (errorCode == DatanodeProtocol.DISK_ERROR) {
LOG.warn("Volume failed on " + dnName);
} else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) {
namesystem.removeDatanode(nodeReg);
}
}