本文整理汇总了Java中org.apache.hadoop.net.StaticMapping.addNodeToRack方法的典型用法代码示例。如果您正苦于以下问题:Java StaticMapping.addNodeToRack方法的具体用法?Java StaticMapping.addNodeToRack怎么用?Java StaticMapping.addNodeToRack使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.net.StaticMapping
的用法示例。
在下文中一共展示了StaticMapping.addNodeToRack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testPlacementWithLocalRackNodesDecommissioned
import org.apache.hadoop.net.StaticMapping; //导入方法依赖的package包/类
/**
* Verify decommissioned nodes should not be selected.
*/
@Test
public void testPlacementWithLocalRackNodesDecommissioned() throws Exception {
String clientMachine = "client.foo.com";
// Map client to RACK3
String clientRack = "/RACK3";
StaticMapping.addNodeToRack(clientMachine, clientRack);
final DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
DatanodeDescriptor dnd3 = dnm.getDatanode(
cluster.getDataNodes().get(3).getDatanodeId());
assertEquals(dnd3.getNetworkLocation(), clientRack);
dnm.getDecomManager().startDecommission(dnd3);
try {
testPlacement(clientMachine, clientRack, false);
} finally {
dnm.getDecomManager().stopDecommission(dnd3);
}
}
示例2: setUp
import org.apache.hadoop.net.StaticMapping; //导入方法依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
Configuration conf = new Configuration();
conf.set("mapreduce.jobtracker.address", "localhost:0");
conf.set("mapreduce.jobtracker.http.address", "0.0.0.0:0");
conf.setClass("topology.node.switch.mapping.impl", StaticMapping.class,
DNSToSwitchMapping.class);
dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
mrCluster = new MiniMRCluster(numSlaves, dfsCluster.getFileSystem()
.getUri().toString(), 1);
jt = mrCluster.getJobTrackerRunner().getJobTracker();
// Set up the Topology Information
for (int i = 0; i < hosts.length; i++) {
StaticMapping.addNodeToRack(hosts[i], racks[i]);
}
for (String s : trackers) {
FakeObjectUtilities.establishFirstContact(jt, s);
}
}
示例3: startTaskTrackers
import org.apache.hadoop.net.StaticMapping; //导入方法依赖的package包/类
/**
* Start simulated task trackers based on topology.
* @param clusterStory The cluster topology.
* @param now
* time stamp when the simulator is started, {@link SimulatorTaskTracker}s
* are started shortly after this time stamp
*/
void startTaskTrackers(ClusterStory clusterStory, long now) {
/** port assigned to TTs, incremented by 1 for each TT */
int port = 10000;
long ms = now + 100;
for (MachineNode node : clusterStory.getMachines()) {
String hostname = node.getName();
RackNode rackNode = node.getRackNode();
StaticMapping.addNodeToRack(hostname, rackNode.getName());
String taskTrackerName = "tracker_" + hostname + ":localhost/127.0.0.1:"
+ port;
port++;
SimulatorTaskTracker tt = new SimulatorTaskTracker(jt, taskTrackerName,
hostname, node.getMapSlots(), node.getReduceSlots());
queue.addAll(tt.init(ms++));
}
}
示例4: startTaskTracker
import org.apache.hadoop.net.StaticMapping; //导入方法依赖的package包/类
public void startTaskTracker(String host, String rack, int idx, int numDir)
throws IOException {
if (rack != null) {
StaticMapping.addNodeToRack(host, rack);
}
if (host != null) {
NetUtils.addStaticResolution(host, "localhost");
try {
InetAddress addr = InetAddress.getByName(host);
NetUtils.addStaticResolution(addr.getHostAddress(),"localhost");
} catch (UnknownHostException e) {
}
}
TaskTrackerRunner taskTracker;
taskTracker = new TaskTrackerRunner(idx, numDir, host, conf, rjtFailureInjector);
addTaskTracker(taskTracker);
}
示例5: startTaskTracker
import org.apache.hadoop.net.StaticMapping; //导入方法依赖的package包/类
/**
* Start the tasktracker.
*/
public void startTaskTracker(String host, String rack, int idx, int numDir)
throws IOException {
if (rack != null) {
StaticMapping.addNodeToRack(host, rack);
}
if (host != null) {
NetUtils.addStaticResolution(host, "localhost");
}
TaskTrackerRunner taskTracker;
taskTracker = new TaskTrackerRunner(idx, numDir, host, conf);
Thread taskTrackerThread = new Thread(taskTracker);
taskTrackerList.add(taskTracker);
taskTrackerThreadList.add(taskTrackerThread);
taskTrackerThread.start();
++numTaskTrackers;
}
示例6: suite
import org.apache.hadoop.net.StaticMapping; //导入方法依赖的package包/类
public static Test suite() {
TestSetup setup =
new TestSetup(new TestSuite(TestRackAwareTaskPlacement.class)) {
protected void setUp() throws Exception {
JobConf conf = new JobConf();
conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0");
conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0");
conf.setClass("topology.node.switch.mapping.impl",
StaticMapping.class, DNSToSwitchMapping.class);
conf.set(JTConfig.JT_INSTRUMENTATION,
FakeJobTrackerMetricsInst.class.getName());
jobTracker = new FakeJobTracker(conf, new FakeClock(), trackers);
fakeInst = (FakeJobTrackerMetricsInst) jobTracker.getInstrumentation();
// Set up the Topology Information
for (int i = 0; i < allHosts.length; i++) {
StaticMapping.addNodeToRack(allHosts[i], allRacks[i]);
}
for (String tracker : trackers) {
FakeObjectUtilities.establishFirstContact(jobTracker, tracker);
}
}
};
return setup;
}
示例7: suite
import org.apache.hadoop.net.StaticMapping; //导入方法依赖的package包/类
public static Test suite() {
TestSetup setup = new TestSetup(new TestSuite(TestJobInProgress.class)) {
protected void setUp() throws Exception {
JobConf conf = new JobConf();
conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0");
conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0");
conf.setClass("topology.node.switch.mapping.impl",
StaticMapping.class, DNSToSwitchMapping.class);
jobTracker = new FakeJobTracker(conf, new FakeClock(), trackers);
// Set up the Topology Information
for (int i = 0; i < hosts.length; i++) {
StaticMapping.addNodeToRack(hosts[i], racks[i]);
}
for (String s: trackers) {
FakeObjectUtilities.establishFirstContact(jobTracker, s);
}
}
};
return setup;
}
示例8: testLocalRackPlacement
import org.apache.hadoop.net.StaticMapping; //导入方法依赖的package包/类
/**
* Verify rack-local node selection for the rack-local client in case of no
* local node
*/
@Test
public void testLocalRackPlacement() throws Exception {
String clientMachine = "client.foo.com";
// Map client to RACK2
String clientRack = "/RACK2";
StaticMapping.addNodeToRack(clientMachine, clientRack);
testPlacement(clientMachine, clientRack);
}
示例9: testLocalRackPlacement
import org.apache.hadoop.net.StaticMapping; //导入方法依赖的package包/类
/**
* Verify rack-local node selection for the rack-local client in case of no
* local node
*/
@Test
public void testLocalRackPlacement() throws Exception {
String clientMachine = "client.foo.com";
// Map client to RACK2
String clientRack = "/RACK2";
StaticMapping.addNodeToRack(clientMachine, clientRack);
testPlacement(clientMachine, clientRack, true);
}
示例10: startTaskTracker
import org.apache.hadoop.net.StaticMapping; //导入方法依赖的package包/类
/**
* Start the tasktracker.
*/
public void startTaskTracker(String host, String rack, int idx, int numDir)
throws IOException {
if (rack != null) {
StaticMapping.addNodeToRack(host, rack);
}
if (host != null) {
NetUtils.addStaticResolution(host, "localhost");
}
TaskTrackerRunner taskTracker;
taskTracker = new TaskTrackerRunner(idx, numDir, host, conf);
addTaskTracker(taskTracker);
}
示例11: setupHostsFile
import org.apache.hadoop.net.StaticMapping; //导入方法依赖的package包/类
/**
* Helper function to generate consectuve datanode addresses and
* fill in the hostfiles with them.
* @param baseDirectory Root directory where the hosts file should be.
* @param racks RackAwareness to assign
*/
private static List<InetSocketAddress> setupHostsFile(int numberOfDatanodes, Configuration conf,
File baseDirectory, String[] racks) throws IOException {
List<InetSocketAddress> datanodeAddresses = generateDatanodeAddresses(numberOfDatanodes);
conf.set(FSConstants.DFS_HOSTS,
writeHostsFile(datanodeAddresses, getHostsFile(conf, baseDirectory)));
if (racks != null) {
for (int i = 0; i < racks.length; i++) {
StaticMapping.addNodeToRack(NetUtils.toIpPort(datanodeAddresses.get(i)), racks[i]);
}
}
return datanodeAddresses;
}
示例12: testStartup
import org.apache.hadoop.net.StaticMapping; //导入方法依赖的package包/类
@Test
public void testStartup() throws Exception {
conf = new Configuration();
conf.setClass("dfs.block.replicator.classname",
BlockPlacementPolicyConfigurable.class, BlockPlacementPolicy.class);
File baseDir = MiniDFSCluster.getBaseDirectory(conf);
baseDir.mkdirs();
File hostsFile = new File(baseDir, "hosts");
FileOutputStream out = new FileOutputStream(hostsFile);
out.write("h1\n".getBytes());
out.write("h2\n".getBytes());
out.write("h3\n".getBytes());
out.close();
conf.set("dfs.hosts", hostsFile.getAbsolutePath());
StaticMapping.addNodeToRack("h1", "/r1");
StaticMapping.addNodeToRack("h2", "/r2");
StaticMapping.addNodeToRack("h3", NetworkTopology.DEFAULT_RACK);
cluster = new MiniDFSCluster(conf, 3, new String[] { "/r1", "/r2",
NetworkTopology.DEFAULT_RACK }, new String[] { "h1", "h2", "h3" },
true, false);
DFSTestUtil util = new DFSTestUtil("/testStartup", 10, 10, 1024);
util.createFiles(cluster.getFileSystem(), "/");
util.checkFiles(cluster.getFileSystem(), "/");
assertEquals(2,
cluster.getNameNode().getDatanodeReport(DatanodeReportType.LIVE).length);
cluster.shutdown();
}
示例13: startTaskTracker
import org.apache.hadoop.net.StaticMapping; //导入方法依赖的package包/类
public void startTaskTracker(String host, String rack, int idx, int numDir)
throws IOException {
if (rack != null) {
StaticMapping.addNodeToRack(host, rack);
}
if (host != null) {
NetUtils.addStaticResolution(host, "localhost");
}
TaskTrackerRunner taskTracker;
taskTracker = new TaskTrackerRunner(idx, numDir, host, conf);
addTaskTracker(taskTracker);
}
示例14: setStaticMapping
import org.apache.hadoop.net.StaticMapping; //导入方法依赖的package包/类
static void setStaticMapping(LoggedNetworkTopology topology) {
for (LoggedNetworkTopology rack : topology.getChildren()) {
for (LoggedNetworkTopology node : rack.getChildren()) {
StaticMapping.addNodeToRack(node.getName(),
new RackNode(rack.getName(), 1).getName());
}
}
}
示例15: run
import org.apache.hadoop.net.StaticMapping; //导入方法依赖的package包/类
@Override
public void run() {
try {
String dnArg = StartupOption.REGULAR.getName();
if (startOpt != null && startOpt == StartupOption.ROLLBACK) {
dnArg = startOpt.getName();
}
String[] dnArgs = { dnArg };
int iN = curDn + i;
Configuration dnConf = new Configuration(conf);
if (simulatedCapacities != null) {
dnConf.setBoolean("dfs.datanode.simulateddatastorage", true);
dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
simulatedCapacities[i]);
}
File dir1 = new File(dataDir, "data" + (2 * iN + 1));
File dir2 = new File(dataDir, "data" + (2 * iN + 2));
dir1.mkdirs();
dir2.mkdirs();
if (!dir1.isDirectory() || !dir2.isDirectory()) {
throw new IOException(
"Mkdirs failed to create directory for DataNode " + iN + ": "
+ dir1 + " or " + dir2);
}
dnConf.set("dfs.data.dir", dir1.getPath() + "," + dir2.getPath());
LOG.info("Starting DataNode " + iN + " with dfs.data.dir: "
+ dnConf.get("dfs.data.dir"));
if (hosts != null) {
dnConf.set(FSConstants.SLAVE_HOST_NAME, hosts[i]);
LOG.info("Starting DataNode " + iN + " with hostname set to: "
+ dnConf.get(FSConstants.SLAVE_HOST_NAME));
}
if (racks != null) {
String name = hosts[i];
LOG.info("Adding node with hostname : " + name + " to rack "
+ racks[i]);
StaticMapping.addNodeToRack(name, racks[i]);
}
Configuration newconf = new Configuration(dnConf); // save config
AvatarDataNode dn = instantiateDataNode(dnArgs, dnConf);
// since the HDFS does things based on IP:port, we need to add the
// mapping
// for IP:port to rackId
String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
if (racks != null) {
int port = dn.getSelfAddr().getPort();
System.out.println("Adding node with IP:port : " + ipAddr + ":"
+ port + " to rack " + racks[i]);
StaticMapping.addNodeToRack(ipAddr + ":" + port, racks[i]);
}
dn.runDatanodeDaemon();
synchronized (dataNodes) {
dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs));
}
} catch (IOException e) {
LOG.error("Exception when creating datanode", e);
}
}