本文整理汇总了Java中org.apache.hadoop.net.StaticMapping类的典型用法代码示例。如果您正苦于以下问题:Java StaticMapping类的具体用法?Java StaticMapping怎么用?Java StaticMapping使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
StaticMapping类属于org.apache.hadoop.net包,在下文中一共展示了StaticMapping类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setup
import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
@Before
public void setup() throws IOException {
StaticMapping.resetMap();
Configuration conf = new HdfsConfiguration();
final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" };
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks)
.hosts(hosts).build();
cluster.waitActive();
nameNodeRpc = cluster.getNameNodeRpc();
namesystem = cluster.getNamesystem();
perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null,
FsPermission.getDefault());
}
示例2: testPlacementWithLocalRackNodesDecommissioned
import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
/**
* Verify decommissioned nodes should not be selected.
*/
@Test
public void testPlacementWithLocalRackNodesDecommissioned() throws Exception {
String clientMachine = "client.foo.com";
// Map client to RACK3
String clientRack = "/RACK3";
StaticMapping.addNodeToRack(clientMachine, clientRack);
final DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
DatanodeDescriptor dnd3 = dnm.getDatanode(
cluster.getDataNodes().get(3).getDatanodeId());
assertEquals(dnd3.getNetworkLocation(), clientRack);
dnm.getDecomManager().startDecommission(dnd3);
try {
testPlacement(clientMachine, clientRack, false);
} finally {
dnm.getDecomManager().stopDecommission(dnd3);
}
}
示例3: setup
import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
@Before
public void setup() throws IOException {
StaticMapping.resetMap();
conf = new HdfsConfiguration();
final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" };
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
DEFAULT_BLOCK_SIZE / 2);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks)
.hosts(hosts).build();
cluster.waitActive();
nameNodeRpc = cluster.getNameNodeRpc();
namesystem = cluster.getNamesystem();
perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null,
FsPermission.getDefault());
}
示例4: run
import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
/**
* Create the job tracker and run it.
*/
public void run() {
try {
jc = (jc == null) ? createJobConf() : createJobConf(jc);
String localPath = System.getProperty("test.build.data",
"build/test/mapred/local");
File f = new File(localPath).getAbsoluteFile();
jc.set("mapred.local.dir", f.getAbsolutePath());
jc.setClass("topology.node.switch.mapping.impl",
StaticMapping.class, DNSToSwitchMapping.class);
final String id =
new SimpleDateFormat("yyyyMMddHHmmssSSS").format(new Date());
if (ugi == null) {
ugi = UserGroupInformation.getCurrentUser();
}
tracker = ugi.doAs(new PrivilegedExceptionAction<JobTracker>() {
public JobTracker run() throws InterruptedException, IOException {
return JobTracker.startTracker(jc, id);
}
});
tracker.offerService();
} catch (Throwable e) {
LOG.error("Job tracker crashed", e);
isActive = false;
}
}
示例5: setUp
import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
Configuration conf = new Configuration();
conf.set("mapreduce.jobtracker.address", "localhost:0");
conf.set("mapreduce.jobtracker.http.address", "0.0.0.0:0");
conf.setClass("topology.node.switch.mapping.impl", StaticMapping.class,
DNSToSwitchMapping.class);
dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
mrCluster = new MiniMRCluster(numSlaves, dfsCluster.getFileSystem()
.getUri().toString(), 1);
jt = mrCluster.getJobTrackerRunner().getJobTracker();
// Set up the Topology Information
for (int i = 0; i < hosts.length; i++) {
StaticMapping.addNodeToRack(hosts[i], racks[i]);
}
for (String s : trackers) {
FakeObjectUtilities.establishFirstContact(jt, s);
}
}
示例6: run
import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
/**
* Create the job tracker and run it.
*/
public void run() {
try {
jc = (jc == null) ? createJobConf() : createJobConf(jc);
File f = new File("build/test/mapred/local").getAbsoluteFile();
jc.set("mapred.local.dir",f.getAbsolutePath());
jc.setClass("topology.node.switch.mapping.impl",
StaticMapping.class, DNSToSwitchMapping.class);
String id =
new SimpleDateFormat("yyyyMMddHHmmssSSS").format(new Date());
tracker = JobTracker.startTracker(jc, id);
tracker.offerService();
} catch (Throwable e) {
LOG.error("Job tracker crashed", e);
isActive = false;
}
}
示例7: startTaskTrackers
import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
/**
* Start simulated task trackers based on topology.
* @param clusterStory The cluster topology.
* @param now
* time stamp when the simulator is started, {@link SimulatorTaskTracker}s
* are started shortly after this time stamp
*/
void startTaskTrackers(ClusterStory clusterStory, long now) {
/** port assigned to TTs, incremented by 1 for each TT */
int port = 10000;
long ms = now + 100;
for (MachineNode node : clusterStory.getMachines()) {
String hostname = node.getName();
RackNode rackNode = node.getRackNode();
StaticMapping.addNodeToRack(hostname, rackNode.getName());
String taskTrackerName = "tracker_" + hostname + ":localhost/127.0.0.1:"
+ port;
port++;
SimulatorTaskTracker tt = new SimulatorTaskTracker(jt, taskTrackerName,
hostname, node.getMapSlots(), node.getReduceSlots());
queue.addAll(tt.init(ms++));
}
}
示例8: startTaskTracker
import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
public void startTaskTracker(String host, String rack, int idx, int numDir)
throws IOException {
if (rack != null) {
StaticMapping.addNodeToRack(host, rack);
}
if (host != null) {
NetUtils.addStaticResolution(host, "localhost");
try {
InetAddress addr = InetAddress.getByName(host);
NetUtils.addStaticResolution(addr.getHostAddress(),"localhost");
} catch (UnknownHostException e) {
}
}
TaskTrackerRunner taskTracker;
taskTracker = new TaskTrackerRunner(idx, numDir, host, conf, rjtFailureInjector);
addTaskTracker(taskTracker);
}
示例9: run
import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
/**
* Create the job tracker and run it.
*/
public void run() {
try {
jc = (jc == null) ? createJobConf() : createJobConf(jc);
File f = new File("build/test/mapred/local").getAbsoluteFile();
jc.set("mapred.local.dir",f.getAbsolutePath());
jc.setClass("topology.node.switch.mapping.impl",
StaticMapping.class, DNSToSwitchMapping.class);
final String id =
new SimpleDateFormat("yyyyMMddHHmmssSSS").format(new Date());
if (ugi == null) {
ugi = UserGroupInformation.getCurrentUser();
}
tracker = ugi.doAs(new PrivilegedExceptionAction<JobTracker>() {
public JobTracker run() throws InterruptedException, IOException {
return JobTracker.startTracker(jc, id);
}
});
tracker.offerService();
} catch (Throwable e) {
LOG.error("Job tracker crashed", e);
isActive = false;
}
}
示例10: configureJobConf
import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
static void configureJobConf(JobConf conf, String namenode,
int clusterManagerPort, int proxyJobTrackerPort,
UnixUserGroupInformation ugi) {
FileSystem.setDefaultUri(conf, namenode);
conf.set(CoronaConf.CM_ADDRESS,
"localhost:" + clusterManagerPort);
conf.set(CoronaConf.PROXY_JOB_TRACKER_ADDRESS,
"localhost:" + proxyJobTrackerPort);
conf.set("mapred.job.tracker", "corona");
conf.set("mapred.job.tracker.http.address",
"127.0.0.1:0");
conf.setClass("topology.node.switch.mapping.impl",
StaticMapping.class, DNSToSwitchMapping.class);
conf.set("mapred.job.tracker.class", CoronaJobTracker.class.getName());
if (ugi != null) {
conf.set("mapred.system.dir", "/mapred/system");
UnixUserGroupInformation.saveToConf(conf,
UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
}
// for debugging have all task output sent to the test output
JobClient.setTaskOutputFilter(conf, JobClient.TaskStatusFilter.ALL);
}
示例11: startTaskTracker
import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
/**
* Start the tasktracker.
*/
public void startTaskTracker(String host, String rack, int idx, int numDir)
throws IOException {
if (rack != null) {
StaticMapping.addNodeToRack(host, rack);
}
if (host != null) {
NetUtils.addStaticResolution(host, "localhost");
}
TaskTrackerRunner taskTracker;
taskTracker = new TaskTrackerRunner(idx, numDir, host, conf);
Thread taskTrackerThread = new Thread(taskTracker);
taskTrackerList.add(taskTracker);
taskTrackerThreadList.add(taskTrackerThread);
taskTrackerThread.start();
++numTaskTrackers;
}
示例12: suite
import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
public static Test suite() {
TestSetup setup =
new TestSetup(new TestSuite(TestRackAwareTaskPlacement.class)) {
protected void setUp() throws Exception {
JobConf conf = new JobConf();
conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0");
conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0");
conf.setClass("topology.node.switch.mapping.impl",
StaticMapping.class, DNSToSwitchMapping.class);
conf.set(JTConfig.JT_INSTRUMENTATION,
FakeJobTrackerMetricsInst.class.getName());
jobTracker = new FakeJobTracker(conf, new FakeClock(), trackers);
fakeInst = (FakeJobTrackerMetricsInst) jobTracker.getInstrumentation();
// Set up the Topology Information
for (int i = 0; i < allHosts.length; i++) {
StaticMapping.addNodeToRack(allHosts[i], allRacks[i]);
}
for (String tracker : trackers) {
FakeObjectUtilities.establishFirstContact(jobTracker, tracker);
}
}
};
return setup;
}
示例13: run
import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
/**
* Create the job tracker and run it.
*/
public void run() {
try {
jc = (jc == null) ? createJobConf() : createJobConf(jc);
File f = new File("build/test/mapred/local").getAbsoluteFile();
jc.set(MRConfig.LOCAL_DIR, f.getAbsolutePath());
jc.setClass("topology.node.switch.mapping.impl",
StaticMapping.class, DNSToSwitchMapping.class);
final String id =
new SimpleDateFormat("yyyyMMddHHmmssSSS").format(new Date());
if (ugi == null) {
ugi = UserGroupInformation.getLoginUser();
}
tracker = ugi.doAs(new PrivilegedExceptionAction<JobTracker>() {
public JobTracker run() throws InterruptedException, IOException {
return JobTracker.startTracker(jc, clock, id);
}
});
tracker.offerService();
} catch (Throwable e) {
LOG.error("Job tracker crashed", e);
isActive = false;
}
}
示例14: suite
import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
public static Test suite() {
TestSetup setup = new TestSetup(new TestSuite(TestJobInProgress.class)) {
protected void setUp() throws Exception {
JobConf conf = new JobConf();
conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0");
conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0");
conf.setClass("topology.node.switch.mapping.impl",
StaticMapping.class, DNSToSwitchMapping.class);
jobTracker = new FakeJobTracker(conf, new FakeClock(), trackers);
// Set up the Topology Information
for (int i = 0; i < hosts.length; i++) {
StaticMapping.addNodeToRack(hosts[i], racks[i]);
}
for (String s: trackers) {
FakeObjectUtilities.establishFirstContact(jobTracker, s);
}
}
};
return setup;
}
示例15: createMumakConf
import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
/**
* Creates the configuration for mumak simulation. This is kept modular mostly for
* testing purposes. so that the standard configuration can be modified before passing
* it to the init() function.
* @return JobConf: the configuration for the SimulatorJobTracker
*/
JobConf createMumakConf() {
JobConf jobConf = new JobConf(getConf());
jobConf.setClass("topology.node.switch.mapping.impl",
StaticMapping.class, DNSToSwitchMapping.class);
jobConf.set("fs.default.name", "file:///");
jobConf.set("mapred.job.tracker", "localhost:8012");
jobConf.setInt("mapred.jobtracker.job.history.block.size", 512);
jobConf.setInt("mapred.jobtracker.job.history.buffer.size", 512);
jobConf.setLong("mapred.tasktracker.expiry.interval", 5000);
jobConf.setInt("mapred.reduce.copy.backoff", 4);
jobConf.setLong("mapred.job.reuse.jvm.num.tasks", -1);
jobConf.setUser("mumak");
jobConf.set("mapred.system.dir",
jobConf.get("hadoop.log.dir", "/tmp/hadoop-"+jobConf.getUser()) + "/mapred/system");
return jobConf;
}