當前位置: 首頁>>代碼示例>>Java>>正文


Java DecommissionManager類代碼示例

本文整理匯總了Java中org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager的典型用法代碼示例。如果您正苦於以下問題:Java DecommissionManager類的具體用法?Java DecommissionManager怎麽用?Java DecommissionManager使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


DecommissionManager類屬於org.apache.hadoop.hdfs.server.blockmanagement包,在下文中一共展示了DecommissionManager類的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testNodesPerInterval

import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager; //導入依賴的package包/類
@Deprecated
@Test(timeout=120000)
public void testNodesPerInterval() throws Exception {
  Configuration newConf = new Configuration(conf);
  org.apache.log4j.Logger.getLogger(DecommissionManager.class)
      .setLevel(Level.TRACE);
  // Set the deprecated configuration key which limits the # of nodes per 
  // interval
  newConf.setInt("dfs.namenode.decommission.nodes.per.interval", 1);
  // Disable the normal monitor runs
  newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
      Integer.MAX_VALUE);
  startCluster(1, 3, newConf);
  final FileSystem fs = cluster.getFileSystem();
  final DatanodeManager datanodeManager =
      cluster.getNamesystem().getBlockManager().getDatanodeManager();
  final DecommissionManager decomManager = datanodeManager.getDecomManager();

  // Write a 3 block file, so each node has one block. Should scan 1 node 
  // each time.
  DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA);
  for (int i=0; i<3; i++) {
    doDecomCheck(datanodeManager, decomManager, 1);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:TestDecommission.java

示例2: setUp

import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager; //導入依賴的package包/類
@BeforeClass
public static void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
      false);

  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  dir = new Path(workingDir, "build/test/data/work-dir/decommission");
  assertTrue(localFileSys.mkdirs(dir));
  excludeFile = new Path(dir, "exclude");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  Path includeFile = new Path(dir, "include");
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
      4);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);

  writeConfigFile(localFileSys, excludeFile, null);
  writeConfigFile(localFileSys, includeFile, null);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
  cluster.waitActive();
  fileSys = cluster.getFileSystem();
  cluster.getNamesystem().getBlockManager().getDatanodeManager()
      .setHeartbeatExpireInterval(3000);
  Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:35,代碼來源:TestDecommissioningStatus.java

示例3: testDecommissionDeadDN

import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager; //導入依賴的package包/類
/**
 * Verify the support for decommissioning a datanode that is already dead.
 * Under this scenario the datanode should immediately be marked as
 * DECOMMISSIONED
 */
@Test(timeout=120000)
public void testDecommissionDeadDN() throws Exception {
  Logger log = Logger.getLogger(DecommissionManager.class);
  log.setLevel(Level.DEBUG);
  DatanodeID dnID = cluster.getDataNodes().get(0).getDatanodeId();
  String dnName = dnID.getXferAddr();
  DataNodeProperties stoppedDN = cluster.stopDataNode(0);
  DFSTestUtil.waitForDatanodeState(cluster, dnID.getDatanodeUuid(),
      false, 30000);
  FSNamesystem fsn = cluster.getNamesystem();
  final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
  DatanodeDescriptor dnDescriptor = dm.getDatanode(dnID);
  decommissionNode(fsn, localFileSys, dnName);
  dm.refreshNodes(conf);
  BlockManagerTestUtil.recheckDecommissionState(dm);
  assertTrue(dnDescriptor.isDecommissioned());

  // Add the node back
  cluster.restartDataNode(stoppedDN, true);
  cluster.waitActive();

  // Call refreshNodes on FSNamesystem with empty exclude file to remove the
  // datanode from decommissioning list and make it available again.
  writeConfigFile(localFileSys, excludeFile, null);
  dm.refreshNodes(conf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:32,代碼來源:TestDecommissioningStatus.java

示例4: testBlocksPerInterval

import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager; //導入依賴的package包/類
@Test(timeout=120000)
public void testBlocksPerInterval() throws Exception {
  Configuration newConf = new Configuration(conf);
  org.apache.log4j.Logger.getLogger(DecommissionManager.class)
      .setLevel(Level.TRACE);
  // Turn the blocks per interval way down
  newConf.setInt(
      DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
      3);
  // Disable the normal monitor runs
  newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
      Integer.MAX_VALUE);
  startCluster(1, 3, newConf);
  final FileSystem fs = cluster.getFileSystem();
  final DatanodeManager datanodeManager =
      cluster.getNamesystem().getBlockManager().getDatanodeManager();
  final DecommissionManager decomManager = datanodeManager.getDecomManager();

  // Write a 3 block file, so each node has one block. Should scan 3 nodes.
  DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA);
  doDecomCheck(datanodeManager, decomManager, 3);
  // Write another file, should only scan two
  DFSTestUtil.createFile(fs, new Path("/file2"), 64, (short)3, 0xBAD1DEA);
  doDecomCheck(datanodeManager, decomManager, 2);
  // One more file, should only scan 1
  DFSTestUtil.createFile(fs, new Path("/file3"), 64, (short)3, 0xBAD1DEA);
  doDecomCheck(datanodeManager, decomManager, 1);
  // blocks on each DN now exceeds limit, still scan at least one node
  DFSTestUtil.createFile(fs, new Path("/file4"), 64, (short)3, 0xBAD1DEA);
  doDecomCheck(datanodeManager, decomManager, 1);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:32,代碼來源:TestDecommission.java

示例5: assertTrackedAndPending

import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager; //導入依賴的package包/類
private void assertTrackedAndPending(DecommissionManager decomManager,
    int tracked, int pending) {
  assertEquals("Unexpected number of tracked nodes", tracked,
      decomManager.getNumTrackedNodes());
  assertEquals("Unexpected number of pending nodes", pending,
      decomManager.getNumPendingNodes());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:TestDecommission.java


注:本文中的org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。