本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas.liveReplicas方法的典型用法代码示例。如果您正苦于以下问题:Java NumberReplicas.liveReplicas方法的具体用法?Java NumberReplicas.liveReplicas怎么用?Java NumberReplicas.liveReplicas使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas
的用法示例。
在下文中一共展示了NumberReplicas.liveReplicas方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: logBlockReplicationInfo
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas; //导入方法依赖的package包/类
private void logBlockReplicationInfo(Block block, DatanodeDescriptor srcNode,
NumberReplicas num) {
int curReplicas = num.liveReplicas();
int curExpectedReplicas = getReplication(block);
INode fileINode = blocksMap.getINode(block);
Iterator<DatanodeDescriptor> nodeIter = blocksMap.nodeIterator(block);
StringBuilder nodeList = new StringBuilder();
while (nodeIter.hasNext()) {
DatanodeDescriptor node = nodeIter.next();
nodeList.append(node.name);
nodeList.append(" ");
}
FSNamesystem.LOG.info("Block: " + block + ", Expected Replicas: "
+ curExpectedReplicas + ", live replicas: " + curReplicas
+ ", corrupt replicas: " + num.corruptReplicas()
+ ", decommissioned replicas: " + num.decommissionedReplicas()
+ ", excess replicas: " + num.excessReplicas()
+ ", Is Open File: " + fileINode.isUnderConstruction()
+ ", Datanodes having this block: " + nodeList + ", Current Datanode: "
+ srcNode.name + ", Is current datanode decommissioning: "
+ srcNode.isDecommissionInProgress());
}
示例2: updateNeededReplications
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas; //导入方法依赖的package包/类
void updateNeededReplications(Block block, int curReplicasDelta,
int expectedReplicasDelta) {
namesystem.writeLock();
try {
NumberReplicas repl = countNodes(block);
int curExpectedReplicas = getReplication(block);
if (isNeededReplication(block, curExpectedReplicas, repl.liveReplicas())) {
neededReplications.update(block, repl.liveReplicas(), repl
.decommissionedReplicas(), curExpectedReplicas, curReplicasDelta,
expectedReplicasDelta);
} else {
int oldReplicas = repl.liveReplicas()-curReplicasDelta;
int oldExpectedReplicas = curExpectedReplicas-expectedReplicasDelta;
neededReplications.remove(block, oldReplicas, repl.decommissionedReplicas(),
oldExpectedReplicas);
}
} finally {
namesystem.writeUnlock();
}
}
示例3: processMisReplicatedBlocks
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas; //导入方法依赖的package包/类
/**
* For each block in the name-node verify whether it belongs to any file,
* over or under replicated. Place it into the respective queue.
*/
void processMisReplicatedBlocks() {
long nrInvalid = 0, nrOverReplicated = 0, nrUnderReplicated = 0;
namesystem.writeLock();
try {
neededReplications.clear();
for (BlockInfo block : blocksMap.getBlocks()) {
INodeFile fileINode = block.getINode();
if (fileINode == null) {
// block does not belong to any file
nrInvalid++;
addToInvalidates(block);
continue;
}
// calculate current replication
short expectedReplication = fileINode.getReplication();
NumberReplicas num = countNodes(block);
int numCurrentReplica = num.liveReplicas();
// add to under-replicated queue if need to be
if (isNeededReplication(block, expectedReplication, numCurrentReplica)) {
if (neededReplications.add(block, numCurrentReplica, num
.decommissionedReplicas(), expectedReplication)) {
nrUnderReplicated++;
}
}
if (numCurrentReplica > expectedReplication) {
// over-replicated block
nrOverReplicated++;
processOverReplicatedBlock(block, expectedReplication, null, null);
}
}
} finally {
namesystem.writeUnlock();
}
FSNamesystem.LOG.info("Total number of blocks = " + blocksMap.size());
FSNamesystem.LOG.info("Number of invalid blocks = " + nrInvalid);
FSNamesystem.LOG.info("Number of under-replicated blocks = " + nrUnderReplicated);
FSNamesystem.LOG.info("Number of over-replicated blocks = " + nrOverReplicated);
}
示例4: testRaidMissingBlocksByTakingDownDataNode
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas; //导入方法依赖的package包/类
/**
* Take down a datanode to generate raid missing blocks, and then bring it back
* will restore the missing blocks.
*/
@Test
public void testRaidMissingBlocksByTakingDownDataNode() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
Configuration conf = new Configuration();
try {
cluster = new MiniDFSCluster(conf, 1, true, null);
final FSNamesystem namesystem = cluster.getNameNode().namesystem;
final DistributedFileSystem dfs = DFSUtil.convertToDFS(cluster.getFileSystem());
String filePath = "/test/file1";
RaidCodec rsCodec = RaidCodec.getCodec("rs");
RaidDFSUtil.constructFakeRaidFile(dfs, filePath, rsCodec);
DatanodeDescriptor[] datanodes = (DatanodeDescriptor[])
namesystem.heartbeats.toArray(
new DatanodeDescriptor[1]);
assertEquals(1, datanodes.length);
// shutdown the datanode
DataNodeProperties dnprop = shutdownDataNode(cluster, datanodes[0]);
assertEquals(rsCodec.numStripeBlocks, namesystem.getRaidMissingBlocksCount());
assertEquals(0, namesystem.getMissingBlocksCount()); // zero non-raid missing block
assertEquals(0, namesystem.getNonCorruptUnderReplicatedBlocks());
// bring up the datanode
cluster.restartDataNode(dnprop);
// Wait for block report
LOG.info("wait for its block report to come in");
NumberReplicas num;
FileStatus stat = dfs.getFileStatus(new Path(filePath));
LocatedBlocks blocks = dfs.getClient().
getLocatedBlocks(filePath, 0, stat.getLen());
long startTime = System.currentTimeMillis();
do {
Thread.sleep(1000);
int totalCount = 0;
namesystem.readLock();
try {
for (LocatedBlock block : blocks.getLocatedBlocks()) {
num = namesystem.countNodes(block.getBlock());
totalCount += num.liveReplicas();
}
if (totalCount == rsCodec.numDataBlocks) {
break;
} else {
LOG.info("wait for block report, received total replicas: " + totalCount);
}
} finally {
namesystem.readUnlock();
}
} while (System.currentTimeMillis() - startTime < 30000);
assertEquals(0, namesystem.getRaidMissingBlocksCount());
assertEquals(0, namesystem.getMissingBlocksCount()); // zero non-raid missing block
assertEquals(0, namesystem.getNonCorruptUnderReplicatedBlocks());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例5: metaSave
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas; //导入方法依赖的package包/类
void metaSave(PrintWriter out) {
//
// Dump contents of neededReplication
//
synchronized (neededReplications) {
out.println("Metasave: Blocks waiting for replication: " +
neededReplications.size());
for (Block block : neededReplications) {
List<DatanodeDescriptor> containingNodes =
new ArrayList<DatanodeDescriptor>();
NumberReplicas numReplicas = new NumberReplicas();
// source node returned is not used
chooseSourceDatanode(block, containingNodes, numReplicas);
int usableReplicas = numReplicas.liveReplicas() +
numReplicas.decommissionedReplicas();
if (block instanceof BlockInfo) {
String fileName = ((BlockInfo)block).getINode().getFullPathName();
out.print(fileName + ": ");
}
// l: == live:, d: == decommissioned c: == corrupt e: == excess
out.print(block + ((usableReplicas > 0)? "" : " MISSING") +
" (replicas:" +
" l: " + numReplicas.liveReplicas() +
" d: " + numReplicas.decommissionedReplicas() +
" c: " + numReplicas.corruptReplicas() +
" e: " + numReplicas.excessReplicas() + ") ");
Collection<DatanodeDescriptor> corruptNodes =
corruptReplicas.getNodes(block);
for (Iterator<DatanodeDescriptor> jt = blocksMap.nodeIterator(block);
jt.hasNext();) {
DatanodeDescriptor node = jt.next();
String state = "";
if (corruptNodes != null && corruptNodes.contains(node)) {
state = "(corrupt)";
} else if (node.isDecommissioned() ||
node.isDecommissionInProgress()) {
state = "(decommissioned)";
}
out.print(" " + node + state + " : ");
}
out.println("");
}
}
//
// Dump blocks from pendingReplication
//
pendingReplications.metaSave(out);
//
// Dump blocks that are waiting to be deleted
//
dumpRecentInvalidateSets(out);
}
示例6: isReplicationInProgress
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas; //导入方法依赖的package包/类
/**
* Return true if there are any blocks on this node that have not
* yet reached their replication factor. Otherwise returns false.
*/
boolean isReplicationInProgress(DatanodeDescriptor srcNode) {
boolean status = false;
int underReplicatedBlocks = 0;
int decommissionOnlyReplicas = 0;
int underReplicatedInOpenFiles = 0;
final Iterator<? extends Block> it = srcNode.getBlockIterator();
while(it.hasNext()) {
final Block block = it.next();
INode fileINode = blocksMap.getINode(block);
if (fileINode != null) {
NumberReplicas num = countNodes(block);
int curReplicas = num.liveReplicas();
int curExpectedReplicas = getReplication(block);
if (isNeededReplication(block, curExpectedReplicas, curReplicas)) {
if (curExpectedReplicas > curReplicas) {
//Log info about one block for this node which needs replication
if (!status) {
status = true;
logBlockReplicationInfo(block, srcNode, num);
}
underReplicatedBlocks++;
if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) {
decommissionOnlyReplicas++;
}
if (fileINode.isUnderConstruction()) {
underReplicatedInOpenFiles++;
}
}
if (!neededReplications.contains(block) &&
pendingReplications.getNumReplicas(block) == 0) {
//
// These blocks have been reported from the datanode
// after the startDecommission method has been executed. These
// blocks were in flight when the decommissioning was started.
//
neededReplications.add(block,
curReplicas,
num.decommissionedReplicas(),
curExpectedReplicas);
}
}
}
}
srcNode.decommissioningStatus.set(underReplicatedBlocks,
decommissionOnlyReplicas,
underReplicatedInOpenFiles);
return status;
}