当前位置: 首页>>代码示例>>Java>>正文


Java StaticMapping类代码示例

本文整理汇总了Java中org.apache.hadoop.net.StaticMapping的典型用法代码示例。如果您正苦于以下问题:Java StaticMapping类的具体用法?Java StaticMapping怎么用?Java StaticMapping使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


StaticMapping类属于org.apache.hadoop.net包,在下文中一共展示了StaticMapping类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setup

import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
@Before
public void setup() throws IOException {
  StaticMapping.resetMap();
  Configuration conf = new HdfsConfiguration();
  final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
  final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" };

  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks)
      .hosts(hosts).build();
  cluster.waitActive();
  nameNodeRpc = cluster.getNameNodeRpc();
  namesystem = cluster.getNamesystem();
  perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null,
      FsPermission.getDefault());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestDefaultBlockPlacementPolicy.java

示例2: testPlacementWithLocalRackNodesDecommissioned

import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
/**
 * Verify decommissioned nodes should not be selected.
 */
@Test
public void testPlacementWithLocalRackNodesDecommissioned() throws Exception {
  String clientMachine = "client.foo.com";
  // Map client to RACK3
  String clientRack = "/RACK3";
  StaticMapping.addNodeToRack(clientMachine, clientRack);
  final DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
  DatanodeDescriptor dnd3 = dnm.getDatanode(
      cluster.getDataNodes().get(3).getDatanodeId());
  assertEquals(dnd3.getNetworkLocation(), clientRack);
  dnm.getDecomManager().startDecommission(dnd3);
  try {
    testPlacement(clientMachine, clientRack, false);
  } finally {
    dnm.getDecomManager().stopDecommission(dnd3);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:21,代码来源:TestDefaultBlockPlacementPolicy.java

示例3: setup

import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
@Before
public void setup() throws IOException {
  StaticMapping.resetMap();
  conf = new HdfsConfiguration();
  final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
  final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" };

  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
      DEFAULT_BLOCK_SIZE / 2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks)
      .hosts(hosts).build();
  cluster.waitActive();
  nameNodeRpc = cluster.getNameNodeRpc();
  namesystem = cluster.getNamesystem();
  perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null,
      FsPermission.getDefault());
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:19,代码来源:TestDefaultBlockPlacementPolicy.java

示例4: run

import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
/**
 * Create the job tracker and run it.
 */
public void run() {
  try {
    jc = (jc == null) ? createJobConf() : createJobConf(jc);
    String localPath = System.getProperty("test.build.data",
        "build/test/mapred/local");
    File f = new File(localPath).getAbsoluteFile();
    jc.set("mapred.local.dir", f.getAbsolutePath());
    jc.setClass("topology.node.switch.mapping.impl", 
        StaticMapping.class, DNSToSwitchMapping.class);
    final String id =
      new SimpleDateFormat("yyyyMMddHHmmssSSS").format(new Date());
    if (ugi == null) {
      ugi = UserGroupInformation.getCurrentUser();
    }
    tracker = ugi.doAs(new PrivilegedExceptionAction<JobTracker>() {
      public JobTracker run() throws InterruptedException, IOException {
        return JobTracker.startTracker(jc, id);
      }
    });
    tracker.offerService();
  } catch (Throwable e) {
    LOG.error("Job tracker crashed", e);
    isActive = false;
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:29,代码来源:MiniMRCluster.java

示例5: setUp

import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
  Configuration conf = new Configuration();
  conf.set("mapreduce.jobtracker.address", "localhost:0");
  conf.set("mapreduce.jobtracker.http.address", "0.0.0.0:0");
  conf.setClass("topology.node.switch.mapping.impl", StaticMapping.class,
      DNSToSwitchMapping.class);
  dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
  mrCluster = new MiniMRCluster(numSlaves, dfsCluster.getFileSystem()
      .getUri().toString(), 1);
  jt = mrCluster.getJobTrackerRunner().getJobTracker();
  // Set up the Topology Information
  for (int i = 0; i < hosts.length; i++) {
    StaticMapping.addNodeToRack(hosts[i], racks[i]);
  }
  for (String s : trackers) {
    FakeObjectUtilities.establishFirstContact(jt, s);
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:20,代码来源:TestJobInProgress.java

示例6: run

import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
/**
 * Create the job tracker and run it.
 */
public void run() {
  try {
    jc = (jc == null) ? createJobConf() : createJobConf(jc);
    File f = new File("build/test/mapred/local").getAbsoluteFile();
    jc.set("mapred.local.dir",f.getAbsolutePath());
    jc.setClass("topology.node.switch.mapping.impl", 
        StaticMapping.class, DNSToSwitchMapping.class);
    String id = 
      new SimpleDateFormat("yyyyMMddHHmmssSSS").format(new Date());
    tracker = JobTracker.startTracker(jc, id);
    tracker.offerService();
  } catch (Throwable e) {
    LOG.error("Job tracker crashed", e);
    isActive = false;
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:20,代码来源:MiniMRCluster.java

示例7: startTaskTrackers

import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
/**
 * Start simulated task trackers based on topology.
 * @param clusterStory The cluster topology.
 * @param now
 *    time stamp when the simulator is started, {@link SimulatorTaskTracker}s
 *    are started shortly after this time stamp
 */
void startTaskTrackers(ClusterStory clusterStory, long now) {
  /** port assigned to TTs, incremented by 1 for each TT */
  int port = 10000;
  long ms = now + 100;

  for (MachineNode node : clusterStory.getMachines()) {
    String hostname = node.getName();
    RackNode rackNode = node.getRackNode();
    StaticMapping.addNodeToRack(hostname, rackNode.getName());
    String taskTrackerName = "tracker_" + hostname + ":localhost/127.0.0.1:"
        + port;
    port++;
    SimulatorTaskTracker tt = new SimulatorTaskTracker(jt, taskTrackerName,
        hostname, node.getMapSlots(), node.getReduceSlots());
    queue.addAll(tt.init(ms++));
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:25,代码来源:SimulatorEngine.java

示例8: startTaskTracker

import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
public void startTaskTracker(String host, String rack, int idx, int numDir)
    throws IOException {
  if (rack != null) {
    StaticMapping.addNodeToRack(host, rack);
  }
  if (host != null) {
    NetUtils.addStaticResolution(host, "localhost");
    try {
      InetAddress addr = InetAddress.getByName(host);
      NetUtils.addStaticResolution(addr.getHostAddress(),"localhost");
    } catch (UnknownHostException e) {
    }
  }
  TaskTrackerRunner taskTracker;
  taskTracker = new TaskTrackerRunner(idx, numDir, host, conf, rjtFailureInjector);
  addTaskTracker(taskTracker);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:18,代码来源:MiniCoronaCluster.java

示例9: run

import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
/**
 * Create the job tracker and run it.
 */
public void run() {
  try {
    jc = (jc == null) ? createJobConf() : createJobConf(jc);
    File f = new File("build/test/mapred/local").getAbsoluteFile();
    jc.set("mapred.local.dir",f.getAbsolutePath());
    jc.setClass("topology.node.switch.mapping.impl", 
        StaticMapping.class, DNSToSwitchMapping.class);
    final String id =
      new SimpleDateFormat("yyyyMMddHHmmssSSS").format(new Date());
    if (ugi == null) {
      ugi = UserGroupInformation.getCurrentUser();
    }
    tracker = ugi.doAs(new PrivilegedExceptionAction<JobTracker>() {
      public JobTracker run() throws InterruptedException, IOException {
        return JobTracker.startTracker(jc, id);
      }
    });
    tracker.offerService();
  } catch (Throwable e) {
    LOG.error("Job tracker crashed", e);
    isActive = false;
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:27,代码来源:MiniMRCluster.java

示例10: configureJobConf

import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
static void configureJobConf(JobConf conf, String namenode,
    int clusterManagerPort, int proxyJobTrackerPort,
    UnixUserGroupInformation ugi) {
  FileSystem.setDefaultUri(conf, namenode);
  conf.set(CoronaConf.CM_ADDRESS,
             "localhost:" + clusterManagerPort);
  conf.set(CoronaConf.PROXY_JOB_TRACKER_ADDRESS,
    "localhost:" + proxyJobTrackerPort);
  conf.set("mapred.job.tracker", "corona");
  conf.set("mapred.job.tracker.http.address",
                      "127.0.0.1:0");
  conf.setClass("topology.node.switch.mapping.impl",
      StaticMapping.class, DNSToSwitchMapping.class);
  conf.set("mapred.job.tracker.class", CoronaJobTracker.class.getName());
  if (ugi != null) {
    conf.set("mapred.system.dir", "/mapred/system");
    UnixUserGroupInformation.saveToConf(conf,
        UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
  }
  // for debugging have all task output sent to the test output
  JobClient.setTaskOutputFilter(conf, JobClient.TaskStatusFilter.ALL);
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:23,代码来源:MiniCoronaCluster.java

示例11: startTaskTracker

import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
/**
 * Start the tasktracker.
 */
public void startTaskTracker(String host, String rack, int idx, int numDir) 
throws IOException {
  if (rack != null) {
    StaticMapping.addNodeToRack(host, rack);
  }
  if (host != null) {
    NetUtils.addStaticResolution(host, "localhost");
  }
  TaskTrackerRunner taskTracker;
  taskTracker = new TaskTrackerRunner(idx, numDir, host, conf);
  
  Thread taskTrackerThread = new Thread(taskTracker);
  taskTrackerList.add(taskTracker);
  taskTrackerThreadList.add(taskTrackerThread);
  taskTrackerThread.start();
  ++numTaskTrackers;
}
 
开发者ID:thisisvoa,项目名称:hadoop-0.20,代码行数:21,代码来源:MiniMRCluster.java

示例12: suite

import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
public static Test suite() {
  TestSetup setup = 
    new TestSetup(new TestSuite(TestRackAwareTaskPlacement.class)) {
    protected void setUp() throws Exception {
      JobConf conf = new JobConf();
      conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0");
      conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0");
      conf.setClass("topology.node.switch.mapping.impl", 
        StaticMapping.class, DNSToSwitchMapping.class);
      conf.set(JTConfig.JT_INSTRUMENTATION,
          FakeJobTrackerMetricsInst.class.getName());
      jobTracker = new FakeJobTracker(conf, new FakeClock(), trackers);
      fakeInst = (FakeJobTrackerMetricsInst) jobTracker.getInstrumentation();
      // Set up the Topology Information
      for (int i = 0; i < allHosts.length; i++) {
        StaticMapping.addNodeToRack(allHosts[i], allRacks[i]);
      }
      for (String tracker : trackers) {
        FakeObjectUtilities.establishFirstContact(jobTracker, tracker);
      }
    }
  };
  return setup;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:25,代码来源:TestRackAwareTaskPlacement.java

示例13: run

import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
/**
 * Create the job tracker and run it.
 */
public void run() {
  try {
    jc = (jc == null) ? createJobConf() : createJobConf(jc);
    File f = new File("build/test/mapred/local").getAbsoluteFile();
    jc.set(MRConfig.LOCAL_DIR, f.getAbsolutePath());
    jc.setClass("topology.node.switch.mapping.impl", 
        StaticMapping.class, DNSToSwitchMapping.class);
    final String id = 
      new SimpleDateFormat("yyyyMMddHHmmssSSS").format(new Date());
    if (ugi == null) {
      ugi = UserGroupInformation.getLoginUser();
    }
    tracker = ugi.doAs(new PrivilegedExceptionAction<JobTracker>() {
      public JobTracker run() throws InterruptedException, IOException {
        return JobTracker.startTracker(jc, clock, id);
      }
    });
    tracker.offerService();
  } catch (Throwable e) {
    LOG.error("Job tracker crashed", e);
    isActive = false;
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:27,代码来源:MiniMRCluster.java

示例14: suite

import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
public static Test suite() {
  TestSetup setup = new TestSetup(new TestSuite(TestJobInProgress.class)) {
    protected void setUp() throws Exception {
      JobConf conf = new JobConf();
      conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0");
      conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0");
      conf.setClass("topology.node.switch.mapping.impl", 
          StaticMapping.class, DNSToSwitchMapping.class);
      jobTracker = new FakeJobTracker(conf, new FakeClock(), trackers);
      // Set up the Topology Information
      for (int i = 0; i < hosts.length; i++) {
        StaticMapping.addNodeToRack(hosts[i], racks[i]);
      }
      for (String s: trackers) {
        FakeObjectUtilities.establishFirstContact(jobTracker, s);
      }
    }
  };
  return setup;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:21,代码来源:TestJobInProgress.java

示例15: createMumakConf

import org.apache.hadoop.net.StaticMapping; //导入依赖的package包/类
/**
 * Creates the configuration for mumak simulation. This is kept modular mostly for 
 * testing purposes. so that the standard configuration can be modified before passing
 * it to the init() function.
 * @return JobConf: the configuration for the SimulatorJobTracker 
 */
  
JobConf createMumakConf() {
  JobConf jobConf = new JobConf(getConf());
  jobConf.setClass("topology.node.switch.mapping.impl",
      StaticMapping.class, DNSToSwitchMapping.class);
  jobConf.set("fs.default.name", "file:///");
  jobConf.set("mapred.job.tracker", "localhost:8012");
  jobConf.setInt("mapred.jobtracker.job.history.block.size", 512);
  jobConf.setInt("mapred.jobtracker.job.history.buffer.size", 512);
  jobConf.setLong("mapred.tasktracker.expiry.interval", 5000);
  jobConf.setInt("mapred.reduce.copy.backoff", 4);
  jobConf.setLong("mapred.job.reuse.jvm.num.tasks", -1);
  jobConf.setUser("mumak");
  jobConf.set("mapred.system.dir", 
      jobConf.get("hadoop.log.dir", "/tmp/hadoop-"+jobConf.getUser()) + "/mapred/system");
  
  return jobConf;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:25,代码来源:SimulatorEngine.java


注:本文中的org.apache.hadoop.net.StaticMapping类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。