当前位置: 首页>>代码示例>>Java>>正文


Java JobTrackerStatus类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus的典型用法代码示例。如果您正苦于以下问题:Java JobTrackerStatus类的具体用法?Java JobTrackerStatus怎么用?Java JobTrackerStatus使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


JobTrackerStatus类属于org.apache.hadoop.mapreduce.Cluster包,在下文中一共展示了JobTrackerStatus类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: ClusterStatus

import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, 
    int maps, int reduces, int maxMaps, int maxReduces, 
    JobTrackerStatus status, int numDecommissionedNodes,
    long used_memory, long max_memory) {
  numActiveTrackers = trackers;
  numBlacklistedTrackers = blacklists;
  this.numExcludedNodes = numDecommissionedNodes;
  this.ttExpiryInterval = ttExpiryInterval;
  map_tasks = maps;
  reduce_tasks = reduces;
  max_map_tasks = maxMaps;
  max_reduce_tasks = maxReduces;
  this.status = status;
  this.used_memory = used_memory;
  this.max_memory = max_memory;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:17,代码来源:ClusterStatus.java

示例2: getClusterStatus

import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
public synchronized ClusterStatus getClusterStatus(boolean detailed) {
  synchronized (taskTrackers) {
    if (detailed) {
      List<List<String>> trackerNames = taskTrackerNames();
      return new ClusterStatus(trackerNames.get(0),
          trackerNames.get(1),
          TASKTRACKER_EXPIRY_INTERVAL,
          totalMaps,
          totalReduces,
          totalMapTaskCapacity,
          totalReduceTaskCapacity, 
          JobTrackerStatus.valueOf(state.name()), getExcludedNodes().size()
          );
    } else {
      return new ClusterStatus(taskTrackers.size() - 
          getBlacklistedTrackerCount(),
          getBlacklistedTrackerCount(),
          TASKTRACKER_EXPIRY_INTERVAL,
          totalMaps,
          totalReduces,
          totalMapTaskCapacity,
          totalReduceTaskCapacity, 
          JobTrackerStatus.valueOf(state.name()), getExcludedNodes().size());
    }
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:27,代码来源:JobTracker.java

示例3: testClientFailover

import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
@Test(timeout=60000)
public void testClientFailover() throws Exception {
  LOG.info("Running testClientFailover");
  startCluster();

  // Test with client. c.f. HATestUtil.setFailoverConfigurations
  JobClient jc = new JobClient(conf);
  assertEquals("client sees jt running", JobTrackerStatus.RUNNING,
      jc.getClusterStatus().getJobTrackerStatus());

  // failover to jt2
  FailoverController fc = new FailoverController(conf, 
      RequestSource.REQUEST_BY_USER);
  fc.failover(target1, target2, false, false);
  
  cluster.waitActive();
  
  assertEquals("jt2 running", JobTrackerStatus.RUNNING,
      jt2.getJobTracker().getClusterStatus().getJobTrackerStatus());
  assertNull("jt1 not running", jt1.getJobTracker());
  
  assertEquals("client still sees jt running", JobTrackerStatus.RUNNING,
      jc.getClusterStatus().getJobTrackerStatus());
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:25,代码来源:TestHAStateTransitions.java

示例4: readFields

import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
  numActiveTrackers = in.readInt();
  int numTrackerNames = in.readInt();
  if (numTrackerNames > 0) {
    for (int i = 0; i < numTrackerNames; i++) {
      String name = Text.readString(in);
      activeTrackers.add(name);
    }
  }
  numBlacklistedTrackers = in.readInt();
  int blackListTrackerInfoSize = in.readInt();
  if(blackListTrackerInfoSize > 0) {
    for (int i = 0; i < blackListTrackerInfoSize; i++) {
      BlackListInfo info = new BlackListInfo();
      info.readFields(in);
      blacklistedTrackersInfo.add(info);
    }
  }
  numExcludedNodes = in.readInt();
  ttExpiryInterval = in.readLong();
  map_tasks = in.readInt();
  reduce_tasks = in.readInt();
  max_map_tasks = in.readInt();
  max_reduce_tasks = in.readInt();
  status = WritableUtils.readEnum(in, JobTrackerStatus.class);
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:27,代码来源:ClusterStatus.java

示例5: waitForJobTracker

import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
/**
 * Wait for the jobtracker to be RUNNING.
 */
static void waitForJobTracker(JobClient jobClient) {
  while (true) {
    try {
      ClusterStatus status = jobClient.getClusterStatus();
      while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) {
        waitFor(100);
        status = jobClient.getClusterStatus();
      }
      break; // means that the jt is ready
    } catch (IOException ioe) {}
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:UtilsForTests.java

示例6: ClusterStatus

import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
/**
 * Construct a new cluster status.
 * 
 * @param trackers no. of tasktrackers in the cluster
 * @param blacklists no of blacklisted task trackers in the cluster
 * @param ttExpiryInterval the tasktracker expiry interval
 * @param maps no. of currently running map-tasks in the cluster
 * @param reduces no. of currently running reduce-tasks in the cluster
 * @param maxMaps the maximum no. of map tasks in the cluster
 * @param maxReduces the maximum no. of reduce tasks in the cluster
 * @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
 * @param numDecommissionedNodes number of decommission trackers
 * @param numGrayListedTrackers number of graylisted trackers
 */
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, int maps,
    int reduces, int maxMaps, int maxReduces, JobTrackerStatus status,
    int numDecommissionedNodes, int numGrayListedTrackers) {
  numActiveTrackers = trackers;
  numBlacklistedTrackers = blacklists;
  this.numExcludedNodes = numDecommissionedNodes;
  this.ttExpiryInterval = ttExpiryInterval;
  map_tasks = maps;
  reduce_tasks = reduces;
  max_map_tasks = maxMaps;
  max_reduce_tasks = maxReduces;
  this.status = status;
  this.grayListedTrackers = numGrayListedTrackers;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:ClusterStatus.java

示例7: readFields

import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
  numActiveTrackers = in.readInt();
  int numTrackerNames = in.readInt();
  if (numTrackerNames > 0) {
    for (int i = 0; i < numTrackerNames; i++) {
      String name = StringInterner.weakIntern(Text.readString(in));
      activeTrackers.add(name);
    }
  }
  numBlacklistedTrackers = in.readInt();
  int blackListTrackerInfoSize = in.readInt();
  if(blackListTrackerInfoSize > 0) {
    for (int i = 0; i < blackListTrackerInfoSize; i++) {
      BlackListInfo info = new BlackListInfo();
      info.readFields(in);
      blacklistedTrackersInfo.add(info);
    }
  }
  numExcludedNodes = in.readInt();
  ttExpiryInterval = in.readLong();
  map_tasks = in.readInt();
  reduce_tasks = in.readInt();
  max_map_tasks = in.readInt();
  max_reduce_tasks = in.readInt();
  status = WritableUtils.readEnum(in, JobTrackerStatus.class);
  grayListedTrackers = in.readInt();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:ClusterStatus.java

示例8: waitActive

import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
public void waitActive() throws IOException {
  while (true) {
    for (JobTrackerHADaemon jtHaDaemon : jtHaDaemonList) {
      JobTracker jt = jtHaDaemon.getJobTracker();
      if (jt != null) {
        if (jt.getClusterStatus().getJobTrackerStatus() == JobTrackerStatus.RUNNING) {
          return;
        }
      }
    }
    try {
      Thread.sleep(1000);
    } catch (InterruptedException ie) {}
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:16,代码来源:MiniMRHACluster.java

示例9: getClusterStatus

import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
@Override
public ClusterStatus getClusterStatus() {
  int numTrackers = trackers.size();
  return new ClusterStatus(numTrackers, 0, 
                           JobTracker.TASKTRACKER_EXPIRY_INTERVAL,
                           maps, reduces,
                           numTrackers * maxMapTasksPerTracker,
                           numTrackers * maxReduceTasksPerTracker,
                           JobTrackerStatus.RUNNING);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:11,代码来源:TestJobQueueTaskScheduler.java

示例10: getClusterStatus

import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
public ClusterStatus getClusterStatus() {
  int numTrackers = trackers.size();
  return new ClusterStatus(numTrackers, 0, 
                           JobTracker.TASKTRACKER_EXPIRY_INTERVAL,
                           maps, reduces,
                           numTrackers * maxMapTasksPerTracker,
                           numTrackers * maxReduceTasksPerTracker,
                           JobTrackerStatus.RUNNING);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:10,代码来源:TestParallelInitialization.java

示例11: tearDown

import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
@After
public void tearDown() {
  ClusterStatus status = mr.getJobTrackerRunner().getJobTracker()
      .getClusterStatus(false);
  if (status.getJobTrackerStatus() == JobTrackerStatus.RUNNING) {
    mr.shutdown();
  }
  if (dfs != null) {
    dfs.shutdown();
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:12,代码来源:TestRecoveryManager.java

示例12: getClusterStatus

import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
public ClusterStatus getClusterStatus() {
  int numTrackers = trackers.size();
  return new ClusterStatus(numTrackers, maps, reduces,
      numTrackers * maxMapTasksPerTracker,
      numTrackers * maxReduceTasksPerTracker,
      JobTrackerStatus.RUNNING);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:8,代码来源:TestCapacityScheduler.java


注:本文中的org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。