本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.UnregisteredNodeException类的典型用法代码示例。如果您正苦于以下问题:Java UnregisteredNodeException类的具体用法?Java UnregisteredNodeException怎么用?Java UnregisteredNodeException使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
UnregisteredNodeException类属于org.apache.hadoop.hdfs.protocol包,在下文中一共展示了UnregisteredNodeException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: verifyJournalRequest
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; //导入依赖的package包/类
/**
* Verifies a journal request
*/
private void verifyJournalRequest(JournalInfo journalInfo)
throws IOException {
verifyLayoutVersion(journalInfo.getLayoutVersion());
String errorMsg = null;
int expectedNamespaceID = namesystem.getNamespaceInfo().getNamespaceID();
if (journalInfo.getNamespaceId() != expectedNamespaceID) {
errorMsg = "Invalid namespaceID in journal request - expected " + expectedNamespaceID
+ " actual " + journalInfo.getNamespaceId();
LOG.warn(errorMsg);
throw new UnregisteredNodeException(journalInfo);
}
if (!journalInfo.getClusterId().equals(namesystem.getClusterId())) {
errorMsg = "Invalid clusterId in journal request - expected "
+ journalInfo.getClusterId() + " actual " + namesystem.getClusterId();
LOG.warn(errorMsg);
throw new UnregisteredNodeException(journalInfo);
}
}
示例2: removeDatanode
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; //导入依赖的package包/类
/**
* Remove a datanode
* @throws UnregisteredNodeException
*/
public void removeDatanode(final DatanodeID node
) throws UnregisteredNodeException {
namesystem.writeLock();
try {
final DatanodeDescriptor descriptor = getDatanode(node);
if (descriptor != null) {
removeDatanode(descriptor);
} else {
NameNode.stateChangeLog.warn("BLOCK* removeDatanode: "
+ node + " does not exist");
}
} finally {
namesystem.writeUnlock();
}
}
示例3: getDatanode
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; //导入依赖的package包/类
/**
* Get data node by storage ID.
*
* @param nodeID
* @return DatanodeDescriptor or null if the node is not found.
* @throws UnregisteredNodeException
*/
public DatanodeDescriptor getDatanode(DatanodeID nodeID)
throws UnregisteredNodeException {
DatanodeDescriptor node = null;
if (nodeID != null && nodeID.getStorageID() != null &&
!nodeID.getStorageID().equals("")) {
node = getDatanode(nodeID.getStorageID());
}
if (node == null) {
return null;
}
if (!node.getXferAddr().equals(nodeID.getXferAddr())) {
final UnregisteredNodeException e =
new UnregisteredNodeException(nodeID, node);
NameNode.stateChangeLog
.fatal("BLOCK* NameSystem.getDatanode: " + e.getLocalizedMessage());
throw e;
}
return node;
}
示例4: verifyRequest
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; //导入依赖的package包/类
/**
* Verifies the given registration.
*
* @param nodeReg node registration
* @throws UnregisteredNodeException if the registration is invalid
*/
private void verifyRequest(NodeRegistration nodeReg) throws IOException {
// verify registration ID
final String id = nodeReg.getRegistrationID();
final String expectedID = namesystem.getRegistrationID();
if (!expectedID.equals(id)) {
LOG.warn("Registration IDs mismatched: the "
+ nodeReg.getClass().getSimpleName() + " ID is " + id
+ " but the expected ID is " + expectedID);
throw new UnregisteredNodeException(nodeReg);
}
}
示例5: invalidateWorkForOneNode
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; //导入依赖的package包/类
/**
* Get blocks to invalidate for <i>nodeId</i>
* in {@link #invalidateBlocks}.
*
* @return number of blocks scheduled for removal during this iteration.
*/
private int invalidateWorkForOneNode(DatanodeInfo dn) {
final List<Block> toInvalidate;
namesystem.writeLock();
try {
// blocks should not be replicated or removed if safe mode is on
if (namesystem.isInSafeMode()) {
LOG.debug("In safemode, not computing replication work");
return 0;
}
try {
DatanodeDescriptor dnDescriptor = datanodeManager.getDatanode(dn);
if (dnDescriptor == null) {
LOG.warn("DataNode " + dn + " cannot be found with UUID " +
dn.getDatanodeUuid() + ", removing block invalidation work.");
invalidateBlocks.remove(dn);
return 0;
}
toInvalidate = invalidateBlocks.invalidateWork(dnDescriptor);
if (toInvalidate == null) {
return 0;
}
} catch(UnregisteredNodeException une) {
return 0;
}
} finally {
namesystem.writeUnlock();
}
blockLog.info("BLOCK* {}: ask {} to delete {}", getClass().getSimpleName(),
dn, toInvalidate);
return toInvalidate.size();
}
示例6: invalidateWorkForOneNode
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; //导入依赖的package包/类
/**
* Get blocks to invalidate for <i>nodeId</i>
* in {@link #invalidateBlocks}.
*
* @return number of blocks scheduled for removal during this iteration.
*/
private int invalidateWorkForOneNode(DatanodeInfo dn) {
final List<Block> toInvalidate;
namesystem.writeLock();
try {
// blocks should not be replicated or removed if safe mode is on
if (namesystem.isInSafeMode()) {
LOG.debug("In safemode, not computing replication work");
return 0;
}
try {
DatanodeDescriptor dnDescriptor = datanodeManager.getDatanode(dn);
if (dnDescriptor == null) {
LOG.warn("DataNode " + dn + " cannot be found with UUID " +
dn.getDatanodeUuid() + ", removing block invalidation work.");
invalidateBlocks.remove(dn);
return 0;
}
toInvalidate = invalidateBlocks.invalidateWork(dnDescriptor);
if (toInvalidate == null) {
return 0;
}
} catch(UnregisteredNodeException une) {
return 0;
}
} finally {
namesystem.writeUnlock();
}
blockLog.debug("BLOCK* {}: ask {} to delete {}", getClass().getSimpleName(),
dn, toInvalidate);
return toInvalidate.size();
}
示例7: verifyRequest
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; //导入依赖的package包/类
/**
* Verifies the given registration.
*
* @param nodeReg node registration
* @throws UnregisteredNodeException if the registration is invalid
*/
void verifyRequest(NodeRegistration nodeReg) throws IOException {
verifyLayoutVersion(nodeReg.getVersion());
if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) {
LOG.warn("Invalid registrationID - expected: "
+ namesystem.getRegistrationID() + " received: "
+ nodeReg.getRegistrationID());
throw new UnregisteredNodeException(nodeReg);
}
}
示例8: getDatanode
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; //导入依赖的package包/类
/**
* Get data node by storage ID.
*
* @param nodeID
* @return DatanodeDescriptor or null if the node is not found.
* @throws UnregisteredNodeException
*/
public DatanodeDescriptor getDatanode(DatanodeID nodeID
) throws UnregisteredNodeException {
final DatanodeDescriptor node = getDatanode(nodeID.getStorageID());
if (node == null)
return null;
if (!node.getXferAddr().equals(nodeID.getXferAddr())) {
final UnregisteredNodeException e = new UnregisteredNodeException(
nodeID, node);
NameNode.stateChangeLog.fatal("BLOCK* NameSystem.getDatanode: "
+ e.getLocalizedMessage());
throw e;
}
return node;
}
示例9: invalidateWorkForOneNode
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; //导入依赖的package包/类
/**
* Get blocks to invalidate for <i>nodeId</i>
* in {@link #invalidateBlocks}.
*
* @return number of blocks scheduled for removal during this iteration.
*/
private int invalidateWorkForOneNode(DatanodeInfo dn) {
final List<Block> toInvalidate;
namesystem.writeLock();
try {
// blocks should not be replicated or removed if safe mode is on
if (namesystem.isInSafeMode()) {
LOG.debug("In safemode, not computing replication work");
return 0;
}
try {
toInvalidate = invalidateBlocks.invalidateWork(datanodeManager.getDatanode(dn));
if (toInvalidate == null) {
return 0;
}
} catch(UnregisteredNodeException une) {
return 0;
}
} finally {
namesystem.writeUnlock();
}
if (NameNode.stateChangeLog.isInfoEnabled()) {
NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
+ ": ask " + dn + " to delete " + toInvalidate);
}
return toInvalidate.size();
}
示例10: verifyRequest
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; //导入依赖的package包/类
/**
* Verifies the given registration.
*
* @param nodeReg
* node registration
* @throws UnregisteredNodeException
* if the registration is invalid
*/
void verifyRequest(NodeRegistration nodeReg) throws IOException {
verifyLayoutVersion(nodeReg.getVersion());
if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) {
LOG.warn("Invalid registrationID - expected: " +
namesystem.getRegistrationID() + " received: " +
nodeReg.getRegistrationID());
throw new UnregisteredNodeException(nodeReg);
}
}
示例11: removeDatanode
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; //导入依赖的package包/类
/**
* Remove a datanode
*
* @throws UnregisteredNodeException
*/
public void removeDatanode(final DatanodeID node
//Called my NameNodeRpcServer
) throws UnregisteredNodeException, IOException {
final DatanodeDescriptor descriptor = getDatanode(node);
if (descriptor != null) {
removeDatanode(descriptor);
} else {
NameNode.stateChangeLog
.warn("BLOCK* removeDatanode: " + node + " does not exist");
}
}
示例12: getBlocksWithLocations
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; //导入依赖的package包/类
/** Get all blocks with location information from a datanode. */
private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
final long size) throws UnregisteredNodeException {
final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
if (node == null) {
blockLog.warn("BLOCK* getBlocks: Asking for blocks from an" +
" unrecorded node {}", datanode);
throw new HadoopIllegalArgumentException(
"Datanode " + datanode + " not found.");
}
int numBlocks = node.numBlocks();
if(numBlocks == 0) {
return new BlocksWithLocations(new BlockWithLocations[0]);
}
Iterator<BlockInfoContiguous> iter = node.getBlockIterator();
int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
// skip blocks
for(int i=0; i<startBlock; i++) {
iter.next();
}
List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
long totalSize = 0;
BlockInfoContiguous curBlock;
while(totalSize<size && iter.hasNext()) {
curBlock = iter.next();
if(!curBlock.isComplete()) continue;
totalSize += addBlock(curBlock, results);
}
if(totalSize<size) {
iter = node.getBlockIterator(); // start from the beginning
for(int i=0; i<startBlock&&totalSize<size; i++) {
curBlock = iter.next();
if(!curBlock.isComplete()) continue;
totalSize += addBlock(curBlock, results);
}
}
return new BlocksWithLocations(
results.toArray(new BlockWithLocations[results.size()]));
}
示例13: getBlocksWithLocations
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; //导入依赖的package包/类
/** Get all blocks with location information from a datanode. */
private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
final long size) throws UnregisteredNodeException {
final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
if (node == null) {
blockLog.warn("BLOCK* getBlocks: Asking for blocks from an" +
" unrecorded node {}", datanode);
throw new HadoopIllegalArgumentException(
"Datanode " + datanode + " not found.");
}
int numBlocks = node.numBlocks();
if(numBlocks == 0) {
return new BlocksWithLocations(new BlockWithLocations[0]);
}
Iterator<BlockInfo> iter = node.getBlockIterator();
// starting from a random block
int startBlock = ThreadLocalRandom.current().nextInt(numBlocks);
// skip blocks
for(int i=0; i<startBlock; i++) {
iter.next();
}
List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
long totalSize = 0;
BlockInfo curBlock;
while(totalSize<size && iter.hasNext()) {
curBlock = iter.next();
if(!curBlock.isComplete()) continue;
totalSize += addBlock(curBlock, results);
}
if(totalSize<size) {
iter = node.getBlockIterator(); // start from the beginning
for(int i=0; i<startBlock&&totalSize<size; i++) {
curBlock = iter.next();
if(!curBlock.isComplete()) continue;
totalSize += addBlock(curBlock, results);
}
}
return new BlocksWithLocations(
results.toArray(new BlockWithLocations[results.size()]));
}
示例14: getBlocksWithLocations
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; //导入依赖的package包/类
/** Get all blocks with location information from a datanode. */
private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
final long size) throws UnregisteredNodeException {
final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
if (node == null) {
blockLog.warn("BLOCK* getBlocks: Asking for blocks from an" +
" unrecorded node {}", datanode);
throw new HadoopIllegalArgumentException(
"Datanode " + datanode + " not found.");
}
int numBlocks = node.numBlocks();
if(numBlocks == 0) {
return new BlocksWithLocations(new BlockWithLocations[0]);
}
Iterator<BlockInfo> iter = node.getBlockIterator();
int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
// skip blocks
for(int i=0; i<startBlock; i++) {
iter.next();
}
List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
long totalSize = 0;
BlockInfo curBlock;
while(totalSize<size && iter.hasNext()) {
curBlock = iter.next();
if(!curBlock.isComplete()) continue;
totalSize += addBlock(curBlock, results);
}
if(totalSize<size) {
iter = node.getBlockIterator(); // start from the beginning
for(int i=0; i<startBlock&&totalSize<size; i++) {
curBlock = iter.next();
if(!curBlock.isComplete()) continue;
totalSize += addBlock(curBlock, results);
}
}
return new BlocksWithLocations(
results.toArray(new BlockWithLocations[results.size()]));
}
示例15: getBlocksWithLocations
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; //导入依赖的package包/类
/** Get all blocks with location information from a datanode. */
private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
final long size) throws UnregisteredNodeException {
final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
if (node == null) {
blockLog.warn("BLOCK* getBlocks: "
+ "Asking for blocks from an unrecorded node " + datanode);
throw new HadoopIllegalArgumentException(
"Datanode " + datanode + " not found.");
}
int numBlocks = node.numBlocks();
if(numBlocks == 0) {
return new BlocksWithLocations(new BlockWithLocations[0]);
}
Iterator<BlockInfo> iter = node.getBlockIterator();
int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
// skip blocks
for(int i=0; i<startBlock; i++) {
iter.next();
}
List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
long totalSize = 0;
BlockInfo curBlock;
while(totalSize<size && iter.hasNext()) {
curBlock = iter.next();
if(!curBlock.isComplete()) continue;
totalSize += addBlock(curBlock, results);
}
if(totalSize<size) {
iter = node.getBlockIterator(); // start from the beginning
for(int i=0; i<startBlock&&totalSize<size; i++) {
curBlock = iter.next();
if(!curBlock.isComplete()) continue;
totalSize += addBlock(curBlock, results);
}
}
return new BlocksWithLocations(
results.toArray(new BlockWithLocations[results.size()]));
}