当前位置: 首页>>代码示例>>Java>>正文


Java NumberReplicas.decommissionedReplicas方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas.decommissionedReplicas方法的典型用法代码示例。如果您正苦于以下问题:Java NumberReplicas.decommissionedReplicas方法的具体用法?Java NumberReplicas.decommissionedReplicas怎么用?Java NumberReplicas.decommissionedReplicas使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas的用法示例。


在下文中一共展示了NumberReplicas.decommissionedReplicas方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testUnderReplicationWithDecommissionDataNode

import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas; //导入方法依赖的package包/类
public void testUnderReplicationWithDecommissionDataNode() throws Exception {
  final Configuration conf = new Configuration();
  final short REPLICATION_FACTOR = (short)1;
  File f = new File(HOST_FILE_PATH);
  if (f.exists()) {
    f.delete();
  }
  f.createNewFile();
  conf.set("dfs.hosts.exclude", HOST_FILE_PATH);
  LOG.info("Start the cluster");
  final MiniDFSCluster cluster = 
    new MiniDFSCluster(conf, REPLICATION_FACTOR, true, null);
  try {
    final FSNamesystem namesystem = cluster.getNameNode().namesystem;
    final FileSystem fs = cluster.getFileSystem();
    DatanodeDescriptor[] datanodes = (DatanodeDescriptor[])
          namesystem.heartbeats.toArray(
              new DatanodeDescriptor[REPLICATION_FACTOR]);
    assertEquals(1, datanodes.length);
    // populate the cluster with a one block file
    final Path FILE_PATH = new Path("/testfile2");
    DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
    DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
    Block block = DFSTestUtil.getFirstBlock(fs, FILE_PATH);

    // shutdown the datanode
    DataNodeProperties dnprop = shutdownDataNode(cluster, datanodes[0]);
    assertEquals(1, namesystem.getMissingBlocksCount()); // one missing block
    assertEquals(0, namesystem.getNonCorruptUnderReplicatedBlocks());

    // Make the only datanode to be decommissioned
    LOG.info("Decommission the datanode " + dnprop);
    addToExcludeFile(namesystem.getConf(), datanodes);
    namesystem.refreshNodes(namesystem.getConf());      
    
    // bring up the datanode
    cluster.restartDataNode(dnprop);

    // Wait for block report
    LOG.info("wait for its block report to come in");
    NumberReplicas num;
    long startTime = System.currentTimeMillis();
    do {
     namesystem.readLock();
     try {
       num = namesystem.countNodes(block);
     } finally {
       namesystem.readUnlock();
     }
     Thread.sleep(1000);
     LOG.info("live: " + num.liveReplicas() 
         + "Decom: " + num.decommissionedReplicas());
    } while (num.decommissionedReplicas() != 1 &&
        System.currentTimeMillis() - startTime < 30000);
    assertEquals("Decommissioning Replicas doesn't reach 1", 
        1, num.decommissionedReplicas());
    assertEquals(1, namesystem.getNonCorruptUnderReplicatedBlocks());
    assertEquals(0, namesystem.getMissingBlocksCount());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:63,代码来源:TestUnderReplicatedBlocks.java

示例2: metaSave

import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas; //导入方法依赖的package包/类
void metaSave(PrintWriter out) {
  //
  // Dump contents of neededReplication
  //
  synchronized (neededReplications) {
    out.println("Metasave: Blocks waiting for replication: " + 
                neededReplications.size());
    for (Block block : neededReplications) {
      List<DatanodeDescriptor> containingNodes =
                                        new ArrayList<DatanodeDescriptor>();
      NumberReplicas numReplicas = new NumberReplicas();
      // source node returned is not used
      chooseSourceDatanode(block, containingNodes, numReplicas);
      int usableReplicas = numReplicas.liveReplicas() +
                           numReplicas.decommissionedReplicas();
     
      if (block instanceof BlockInfo) {
        String fileName = ((BlockInfo)block).getINode().getFullPathName();
        out.print(fileName + ": ");
      }
      // l: == live:, d: == decommissioned c: == corrupt e: == excess
      out.print(block + ((usableReplicas > 0)? "" : " MISSING") + 
                " (replicas:" +
                " l: " + numReplicas.liveReplicas() +
                " d: " + numReplicas.decommissionedReplicas() +
                " c: " + numReplicas.corruptReplicas() +
                " e: " + numReplicas.excessReplicas() + ") "); 

      Collection<DatanodeDescriptor> corruptNodes = 
                                    corruptReplicas.getNodes(block);
      
      for (Iterator<DatanodeDescriptor> jt = blocksMap.nodeIterator(block);
           jt.hasNext();) {
        DatanodeDescriptor node = jt.next();
        String state = "";
        if (corruptNodes != null && corruptNodes.contains(node)) {
          state = "(corrupt)";
        } else if (node.isDecommissioned() || 
            node.isDecommissionInProgress()) {
          state = "(decommissioned)";
        }          
        out.print(" " + node + state + " : ");
      }
      out.println("");
    }
  }

  //
  // Dump blocks from pendingReplication
  //
  pendingReplications.metaSave(out);

  //
  // Dump blocks that are waiting to be deleted
  //
  dumpRecentInvalidateSets(out);
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:58,代码来源:BlockManager.java

示例3: isReplicationInProgress

import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas; //导入方法依赖的package包/类
/**
 * Return true if there are any blocks on this node that have not
 * yet reached their replication factor. Otherwise returns false.
 */
boolean isReplicationInProgress(DatanodeDescriptor srcNode) {
  boolean status = false;
  int underReplicatedBlocks = 0;
  int decommissionOnlyReplicas = 0;
  int underReplicatedInOpenFiles = 0;
  final Iterator<? extends Block> it = srcNode.getBlockIterator();
  while(it.hasNext()) {
    final Block block = it.next();
    INode fileINode = blocksMap.getINode(block);

    if (fileINode != null) {
      NumberReplicas num = countNodes(block);
      int curReplicas = num.liveReplicas();
      int curExpectedReplicas = getReplication(block);
      if (isNeededReplication(block, curExpectedReplicas, curReplicas)) {
        if (curExpectedReplicas > curReplicas) {
          //Log info about one block for this node which needs replication
          if (!status) {
            status = true;
            logBlockReplicationInfo(block, srcNode, num);
          }
          underReplicatedBlocks++;
          if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) {
            decommissionOnlyReplicas++;
          }
          if (fileINode.isUnderConstruction()) {
            underReplicatedInOpenFiles++;
          }
        }
        if (!neededReplications.contains(block) &&
          pendingReplications.getNumReplicas(block) == 0) {
          //
          // These blocks have been reported from the datanode
          // after the startDecommission method has been executed. These
          // blocks were in flight when the decommissioning was started.
          //
          neededReplications.add(block,
                                 curReplicas,
                                 num.decommissionedReplicas(),
                                 curExpectedReplicas);
        }
      }
    }
  }
  srcNode.decommissioningStatus.set(underReplicatedBlocks,
      decommissionOnlyReplicas, 
      underReplicatedInOpenFiles);
  return status;
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:54,代码来源:BlockManager.java

示例4: testUnderReplicationWithDecommissionDataNode

import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas; //导入方法依赖的package包/类
public void testUnderReplicationWithDecommissionDataNode() throws Exception {
  final Configuration conf = new Configuration();
  final short REPLICATION_FACTOR = (short)1;
  File f = new File(HOST_FILE_PATH);
  if (f.exists()) {
    f.delete();
  }
  conf.set("dfs.hosts.exclude", HOST_FILE_PATH);
  LOG.info("Start the cluster");
  final MiniDFSCluster cluster = 
    new MiniDFSCluster(conf, REPLICATION_FACTOR, true, null);
  try {
    final FSNamesystem namesystem = cluster.getNameNode().namesystem;
    final FileSystem fs = cluster.getFileSystem();
    DatanodeDescriptor[] datanodes = (DatanodeDescriptor[])
          namesystem.heartbeats.toArray(
              new DatanodeDescriptor[REPLICATION_FACTOR]);
    assertEquals(1, datanodes.length);
    // populate the cluster with a one block file
    final Path FILE_PATH = new Path("/testfile2");
    DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
    DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
    Block block = DFSTestUtil.getFirstBlock(fs, FILE_PATH);

    // shutdown the datanode
    DataNodeProperties dnprop = shutdownDataNode(cluster, datanodes[0]);
    assertEquals(1, namesystem.getMissingBlocksCount()); // one missing block
    assertEquals(0, namesystem.getNonCorruptUnderReplicatedBlocks());

    // Make the only datanode to be decommissioned
    LOG.info("Decommission the datanode " + dnprop);
    addToExcludeFile(namesystem.getConf(), datanodes);
    namesystem.refreshNodes(namesystem.getConf());      
    
    // bring up the datanode
    cluster.restartDataNode(dnprop);

    // Wait for block report
    LOG.info("wait for its block report to come in");
    NumberReplicas num;
    long startTime = System.currentTimeMillis();
    do {
     namesystem.readLock();
     try {
       num = namesystem.countNodes(block);
     } finally {
       namesystem.readUnlock();
     }
     Thread.sleep(1000);
     LOG.info("live: " + num.liveReplicas() 
         + "Decom: " + num.decommissionedReplicas());
    } while (num.decommissionedReplicas() != 1 &&
        System.currentTimeMillis() - startTime < 30000);
    assertEquals("Decommissioning Replicas doesn't reach 1", 
        1, num.decommissionedReplicas());
    assertEquals(1, namesystem.getNonCorruptUnderReplicatedBlocks());
    assertEquals(0, namesystem.getMissingBlocksCount());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:62,代码来源:TestUnderReplicatedBlocks.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas.decommissionedReplicas方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。