本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.DatanodeInfo类的典型用法代码示例。如果您正苦于以下问题:Java DatanodeInfo类的具体用法?Java DatanodeInfo怎么用?Java DatanodeInfo使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
DatanodeInfo类属于org.apache.hadoop.hdfs.protocol包,在下文中一共展示了DatanodeInfo类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: verifyBlockPlacement
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; //导入依赖的package包/类
@Override
public BlockPlacementStatus verifyBlockPlacement(String srcPath,
LocatedBlock lBlk, int numberOfReplicas) {
DatanodeInfo[] locs = lBlk.getLocations();
if (locs == null)
locs = DatanodeDescriptor.EMPTY_ARRAY;
int numRacks = clusterMap.getNumOfRacks();
if(numRacks <= 1) // only one rack
return new BlockPlacementStatusDefault(
Math.min(numRacks, numberOfReplicas), numRacks);
int minRacks = Math.min(2, numberOfReplicas);
// 1. Check that all locations are different.
// 2. Count locations on different racks.
Set<String> racks = new TreeSet<String>();
for (DatanodeInfo dn : locs)
racks.add(dn.getNetworkLocation());
return new BlockPlacementStatusDefault(racks.size(), minRacks);
}
示例2: fetchBlockByteRange
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; //导入依赖的package包/类
private void fetchBlockByteRange(LocatedBlock block, long start, long end,
byte[] buf, int offset,
Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap)
throws IOException {
block = getBlockAt(block.getStartOffset());
while (true) {
DNAddrPair addressPair = chooseDataNode(block, null);
try {
actualGetFromOneDataNode(addressPair, block, start, end, buf, offset,
corruptedBlockMap);
return;
} catch (IOException e) {
// Ignore. Already processed inside the function.
// Loop through to try the next node.
}
}
}
示例3: seekToNewSource
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; //导入依赖的package包/类
/**
* Seek to given position on a node other than the current node. If
* a node other than the current node is found, then returns true.
* If another node could not be found, then returns false.
*/
@Override
public synchronized boolean seekToNewSource(long targetPos) throws IOException {
boolean markedDead = deadNodes.containsKey(currentNode);
addToDeadNodes(currentNode);
DatanodeInfo oldNode = currentNode;
DatanodeInfo newNode = blockSeekTo(targetPos);
if (!markedDead) {
/* remove it from deadNodes. blockSeekTo could have cleared
* deadNodes and added currentNode again. Thats ok. */
deadNodes.remove(oldNode);
}
if (!oldNode.getDatanodeUuid().equals(newNode.getDatanodeUuid())) {
currentNode = newNode;
return true;
} else {
return false;
}
}
示例4: add
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; //导入依赖的package包/类
/**
* Add a block to the block collection
* which will be invalidated on the specified datanode.
*/
synchronized void add(final Block block, final DatanodeInfo datanode,
final boolean log) {
LightWeightHashSet<Block> set = node2blocks.get(datanode);
if (set == null) {
set = new LightWeightHashSet<Block>();
node2blocks.put(datanode, set);
}
if (set.add(block)) {
numBlocks++;
if (log) {
NameNode.blockStateChangeLog.info("BLOCK* {}: add {} to {}",
getClass().getSimpleName(), block, datanode);
}
}
}
示例5: dump
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; //导入依赖的package包/类
/** Print the contents to out. */
synchronized void dump(final PrintWriter out) {
final int size = node2blocks.values().size();
out.println("Metasave: Blocks " + numBlocks
+ " waiting deletion from " + size + " datanodes.");
if (size == 0) {
return;
}
for(Map.Entry<DatanodeInfo, LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
final LightWeightHashSet<Block> blocks = entry.getValue();
if (blocks.size() > 0) {
out.println(entry.getKey());
out.println(blocks);
}
}
}
示例6: createLocatedBlockNoStorageMedia
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; //导入依赖的package包/类
private LocatedBlock createLocatedBlockNoStorageMedia() {
DatanodeInfo[] dnInfos = {
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
AdminStates.DECOMMISSION_INPROGRESS),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
AdminStates.DECOMMISSIONED),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
AdminStates.NORMAL)
};
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
lb.setBlockToken(new Token<BlockTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(), new Text("kind"),
new Text("service")));
return lb;
}
示例7: compare
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; //导入依赖的package包/类
private void compare(DatanodeInfo dn1, DatanodeInfo dn2) {
assertEquals(dn1.getAdminState(), dn2.getAdminState());
assertEquals(dn1.getBlockPoolUsed(), dn2.getBlockPoolUsed());
assertEquals(dn1.getBlockPoolUsedPercent(),
dn2.getBlockPoolUsedPercent(), DELTA);
assertEquals(dn1.getCapacity(), dn2.getCapacity());
assertEquals(dn1.getDatanodeReport(), dn2.getDatanodeReport());
assertEquals(dn1.getDfsUsed(), dn1.getDfsUsed());
assertEquals(dn1.getDfsUsedPercent(), dn1.getDfsUsedPercent(), DELTA);
assertEquals(dn1.getIpAddr(), dn2.getIpAddr());
assertEquals(dn1.getHostName(), dn2.getHostName());
assertEquals(dn1.getInfoPort(), dn2.getInfoPort());
assertEquals(dn1.getIpcPort(), dn2.getIpcPort());
assertEquals(dn1.getLastUpdate(), dn2.getLastUpdate());
assertEquals(dn1.getLevel(), dn2.getLevel());
assertEquals(dn1.getNetworkLocation(), dn2.getNetworkLocation());
}
示例8: connectToDN
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; //导入依赖的package包/类
/**
* Connect to the given datanode's datantrasfer port, and return
* the resulting IOStreamPair. This includes encryption wrapping, etc.
*/
private IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
LocatedBlock lb) throws IOException {
boolean success = false;
Socket sock = null;
try {
sock = socketFactory.createSocket();
String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname);
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to datanode " + dnAddr);
}
NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
sock.setSoTimeout(timeout);
OutputStream unbufOut = NetUtils.getOutputStream(sock);
InputStream unbufIn = NetUtils.getInputStream(sock);
IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this,
lb.getBlockToken(), dn);
success = true;
return ret;
} finally {
if (!success) {
IOUtils.closeSocket(sock);
}
}
}
示例9: reportTo
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; //导入依赖的package包/类
@Override
public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode,
DatanodeRegistration bpRegistration) throws BPServiceActorActionException {
if (bpRegistration == null) {
return;
}
DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
String[] uuids = { storageUuid };
StorageType[] types = { storageType };
LocatedBlock[] locatedBlock = { new LocatedBlock(block,
dnArr, uuids, types) };
try {
bpNamenode.reportBadBlocks(locatedBlock);
} catch (RemoteException re) {
DataNode.LOG.info("reportBadBlock encountered RemoteException for "
+ "block: " + block , re);
} catch (IOException e) {
throw new BPServiceActorActionException("Failed to report bad block "
+ block + " to namenode: ");
}
}
示例10: allocSlot
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; //导入依赖的package包/类
public Slot allocSlot(DatanodeInfo datanode, DomainPeer peer,
MutableBoolean usedPeer, ExtendedBlockId blockId,
String clientName) throws IOException {
lock.lock();
try {
if (closed) {
LOG.trace(this + ": the DfsClientShmManager isclosed.");
return null;
}
EndpointShmManager shmManager = datanodes.get(datanode);
if (shmManager == null) {
shmManager = new EndpointShmManager(datanode);
datanodes.put(datanode, shmManager);
}
return shmManager.allocSlot(peer, usedPeer, clientName, blockId);
} finally {
lock.unlock();
}
}
示例11: makeBadBlockList
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; //导入依赖的package包/类
private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) {
LocatedBlock goodLocatedBlock = goodBlockList.get(0);
LocatedBlock badLocatedBlock = new LocatedBlock(
goodLocatedBlock.getBlock(),
new DatanodeInfo[] {
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234)
},
goodLocatedBlock.getStartOffset(),
false);
List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>();
badBlocks.add(badLocatedBlock);
return new LocatedBlocks(goodBlockList.getFileLength(), false,
badBlocks, null, true,
null);
}
示例12: datanodeReport
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; //导入依赖的package包/类
DatanodeInfo[] datanodeReport(final DatanodeReportType type
) throws AccessControlException, StandbyException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
readLock();
try {
checkOperation(OperationCategory.UNCHECKED);
final DatanodeManager dm = getBlockManager().getDatanodeManager();
final List<DatanodeDescriptor> results = dm.getDatanodeListForReport(type);
DatanodeInfo[] arr = new DatanodeInfo[results.size()];
for (int i=0; i<arr.length; i++) {
arr[i] = new DatanodeInfo(results.get(i));
}
return arr;
} finally {
readUnlock();
}
}
示例13: getDatanodeStorageReport
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; //导入依赖的package包/类
DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type
) throws AccessControlException, StandbyException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
readLock();
try {
checkOperation(OperationCategory.UNCHECKED);
final DatanodeManager dm = getBlockManager().getDatanodeManager();
final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type);
DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
for (int i = 0; i < reports.length; i++) {
final DatanodeDescriptor d = datanodes.get(i);
reports[i] = new DatanodeStorageReport(new DatanodeInfo(d),
d.getStorageReports());
}
return reports;
} finally {
readUnlock();
}
}
示例14: addBlock
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; //导入依赖的package包/类
@Override
public LocatedBlock addBlock(String src, String clientName,
ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId,
String[] favoredNodes)
throws IOException {
checkNNStartup();
if (stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " + src
+ " fileId=" + fileId + " for " + clientName);
}
Set<Node> excludedNodesSet = null;
if (excludedNodes != null) {
excludedNodesSet = new HashSet<Node>(excludedNodes.length);
for (Node node : excludedNodes) {
excludedNodesSet.add(node);
}
}
List<String> favoredNodesList = (favoredNodes == null) ? null
: Arrays.asList(favoredNodes);
LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, fileId,
clientName, previous, excludedNodesSet, favoredNodesList);
if (locatedBlock != null)
metrics.incrAddBlockOps();
return locatedBlock;
}
示例15: shouldIgnore
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; //导入依赖的package包/类
private boolean shouldIgnore(DatanodeInfo dn) {
// ignore decommissioned nodes
final boolean decommissioned = dn.isDecommissioned();
// ignore decommissioning nodes
final boolean decommissioning = dn.isDecommissionInProgress();
// ignore nodes in exclude list
final boolean excluded = Util.isExcluded(excludedNodes, dn);
// ignore nodes not in the include list (if include list is not empty)
final boolean notIncluded = !Util.isIncluded(includedNodes, dn);
if (decommissioned || decommissioning || excluded || notIncluded) {
if (LOG.isTraceEnabled()) {
LOG.trace("Excluding datanode " + dn + ": " + decommissioned + ", "
+ decommissioning + ", " + excluded + ", " + notIncluded);
}
return true;
}
return false;
}