本文整理汇总了Java中org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager类的典型用法代码示例。如果您正苦于以下问题:Java DecommissionManager类的具体用法?Java DecommissionManager怎么用?Java DecommissionManager使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DecommissionManager类属于org.apache.hadoop.hdfs.server.blockmanagement包,在下文中一共展示了DecommissionManager类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testNodesPerInterval
import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager; //导入依赖的package包/类
@Deprecated
@Test(timeout=120000)
public void testNodesPerInterval() throws Exception {
Configuration newConf = new Configuration(conf);
org.apache.log4j.Logger.getLogger(DecommissionManager.class)
.setLevel(Level.TRACE);
// Set the deprecated configuration key which limits the # of nodes per
// interval
newConf.setInt("dfs.namenode.decommission.nodes.per.interval", 1);
// Disable the normal monitor runs
newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
Integer.MAX_VALUE);
startCluster(1, 3, newConf);
final FileSystem fs = cluster.getFileSystem();
final DatanodeManager datanodeManager =
cluster.getNamesystem().getBlockManager().getDatanodeManager();
final DecommissionManager decomManager = datanodeManager.getDecomManager();
// Write a 3 block file, so each node has one block. Should scan 1 node
// each time.
DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA);
for (int i=0; i<3; i++) {
doDecomCheck(datanodeManager, decomManager, 1);
}
}
示例2: setUp
import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager; //导入依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
false);
// Set up the hosts/exclude files.
localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
dir = new Path(workingDir, "build/test/data/work-dir/decommission");
assertTrue(localFileSys.mkdirs(dir));
excludeFile = new Path(dir, "exclude");
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
Path includeFile = new Path(dir, "include");
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
4);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);
writeConfigFile(localFileSys, excludeFile, null);
writeConfigFile(localFileSys, includeFile, null);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
cluster.getNamesystem().getBlockManager().getDatanodeManager()
.setHeartbeatExpireInterval(3000);
Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
}
示例3: testDecommissionDeadDN
import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager; //导入依赖的package包/类
/**
* Verify the support for decommissioning a datanode that is already dead.
* Under this scenario the datanode should immediately be marked as
* DECOMMISSIONED
*/
@Test(timeout=120000)
public void testDecommissionDeadDN() throws Exception {
Logger log = Logger.getLogger(DecommissionManager.class);
log.setLevel(Level.DEBUG);
DatanodeID dnID = cluster.getDataNodes().get(0).getDatanodeId();
String dnName = dnID.getXferAddr();
DataNodeProperties stoppedDN = cluster.stopDataNode(0);
DFSTestUtil.waitForDatanodeState(cluster, dnID.getDatanodeUuid(),
false, 30000);
FSNamesystem fsn = cluster.getNamesystem();
final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
DatanodeDescriptor dnDescriptor = dm.getDatanode(dnID);
decommissionNode(fsn, localFileSys, dnName);
dm.refreshNodes(conf);
BlockManagerTestUtil.recheckDecommissionState(dm);
assertTrue(dnDescriptor.isDecommissioned());
// Add the node back
cluster.restartDataNode(stoppedDN, true);
cluster.waitActive();
// Call refreshNodes on FSNamesystem with empty exclude file to remove the
// datanode from decommissioning list and make it available again.
writeConfigFile(localFileSys, excludeFile, null);
dm.refreshNodes(conf);
}
示例4: testBlocksPerInterval
import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager; //导入依赖的package包/类
@Test(timeout=120000)
public void testBlocksPerInterval() throws Exception {
Configuration newConf = new Configuration(conf);
org.apache.log4j.Logger.getLogger(DecommissionManager.class)
.setLevel(Level.TRACE);
// Turn the blocks per interval way down
newConf.setInt(
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
3);
// Disable the normal monitor runs
newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
Integer.MAX_VALUE);
startCluster(1, 3, newConf);
final FileSystem fs = cluster.getFileSystem();
final DatanodeManager datanodeManager =
cluster.getNamesystem().getBlockManager().getDatanodeManager();
final DecommissionManager decomManager = datanodeManager.getDecomManager();
// Write a 3 block file, so each node has one block. Should scan 3 nodes.
DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA);
doDecomCheck(datanodeManager, decomManager, 3);
// Write another file, should only scan two
DFSTestUtil.createFile(fs, new Path("/file2"), 64, (short)3, 0xBAD1DEA);
doDecomCheck(datanodeManager, decomManager, 2);
// One more file, should only scan 1
DFSTestUtil.createFile(fs, new Path("/file3"), 64, (short)3, 0xBAD1DEA);
doDecomCheck(datanodeManager, decomManager, 1);
// blocks on each DN now exceeds limit, still scan at least one node
DFSTestUtil.createFile(fs, new Path("/file4"), 64, (short)3, 0xBAD1DEA);
doDecomCheck(datanodeManager, decomManager, 1);
}
示例5: assertTrackedAndPending
import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager; //导入依赖的package包/类
private void assertTrackedAndPending(DecommissionManager decomManager,
int tracked, int pending) {
assertEquals("Unexpected number of tracked nodes", tracked,
decomManager.getNumTrackedNodes());
assertEquals("Unexpected number of pending nodes", pending,
decomManager.getNumPendingNodes());
}