当前位置: 首页>>代码示例>>Java>>正文


Java ClusterStatus.getTaskTrackers方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapred.ClusterStatus.getTaskTrackers方法的典型用法代码示例。如果您正苦于以下问题:Java ClusterStatus.getTaskTrackers方法的具体用法?Java ClusterStatus.getTaskTrackers怎么用?Java ClusterStatus.getTaskTrackers使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapred.ClusterStatus的用法示例。


在下文中一共展示了ClusterStatus.getTaskTrackers方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getSplits

import org.apache.hadoop.mapred.ClusterStatus; //导入方法依赖的package包/类
@Override
public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException {
  final JobClient client =
    new JobClient(new JobConf(jobCtxt.getConfiguration()));
  ClusterStatus stat = client.getClusterStatus(true);
  final long toGen =
    jobCtxt.getConfiguration().getLong(GRIDMIX_GEN_BYTES, -1);
  if (toGen < 0) {
    throw new IOException("Invalid/missing generation bytes: " + toGen);
  }
  final int nTrackers = stat.getTaskTrackers();
  final long bytesPerTracker = toGen / nTrackers;
  final ArrayList<InputSplit> splits = new ArrayList<InputSplit>(nTrackers);
  final Pattern trackerPattern = Pattern.compile("tracker_([^:]*):.*");
  final Matcher m = trackerPattern.matcher("");
  for (String tracker : stat.getActiveTrackerNames()) {
    m.reset(tracker);
    if (!m.find()) {
      System.err.println("Skipping node: " + tracker);
      continue;
    }
    final String name = m.group(1);
    splits.add(new GenSplit(bytesPerTracker, new String[] { name }));
  }
  return splits;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:GenerateData.java

示例2: getSplits

import org.apache.hadoop.mapred.ClusterStatus; //导入方法依赖的package包/类
@Override
public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException {
  final JobConf jobConf = new JobConf(jobCtxt.getConfiguration());
  final JobClient client = new JobClient(jobConf);
  ClusterStatus stat = client.getClusterStatus(true);
  int numTrackers = stat.getTaskTrackers();
  final int fileCount = jobConf.getInt(GRIDMIX_DISTCACHE_FILE_COUNT, -1);

  // Total size of distributed cache files to be generated
  final long totalSize = jobConf.getLong(GRIDMIX_DISTCACHE_BYTE_COUNT, -1);
  // Get the path of the special file
  String distCacheFileList = jobConf.get(GRIDMIX_DISTCACHE_FILE_LIST);
  if (fileCount < 0 || totalSize < 0 || distCacheFileList == null) {
    throw new RuntimeException("Invalid metadata: #files (" + fileCount
        + "), total_size (" + totalSize + "), filelisturi ("
        + distCacheFileList + ")");
  }

  Path sequenceFile = new Path(distCacheFileList);
  FileSystem fs = sequenceFile.getFileSystem(jobConf);
  FileStatus srcst = fs.getFileStatus(sequenceFile);
  // Consider the number of TTs * mapSlotsPerTracker as number of mappers.
  int numMapSlotsPerTracker = jobConf.getInt(TTConfig.TT_MAP_SLOTS, 2);
  int numSplits = numTrackers * numMapSlotsPerTracker;

  List<InputSplit> splits = new ArrayList<InputSplit>(numSplits);
  LongWritable key = new LongWritable();
  BytesWritable value = new BytesWritable();

  // Average size of data to be generated by each map task
  final long targetSize = Math.max(totalSize / numSplits,
                            DistributedCacheEmulator.AVG_BYTES_PER_MAP);
  long splitStartPosition = 0L;
  long splitEndPosition = 0L;
  long acc = 0L;
  long bytesRemaining = srcst.getLen();
  SequenceFile.Reader reader = null;
  try {
    reader = new SequenceFile.Reader(fs, sequenceFile, jobConf);
    while (reader.next(key, value)) {

      // If adding this file would put this split past the target size,
      // cut the last split and put this file in the next split.
      if (acc + key.get() > targetSize && acc != 0) {
        long splitSize = splitEndPosition - splitStartPosition;
        splits.add(new FileSplit(
            sequenceFile, splitStartPosition, splitSize, (String[])null));
        bytesRemaining -= splitSize;
        splitStartPosition = splitEndPosition;
        acc = 0L;
      }
      acc += key.get();
      splitEndPosition = reader.getPosition();
    }
  } finally {
    if (reader != null) {
      reader.close();
    }
  }
  if (bytesRemaining != 0) {
    splits.add(new FileSplit(
        sequenceFile, splitStartPosition, bytesRemaining, (String[])null));
  }

  return splits;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:67,代码来源:GenerateDistCacheData.java

示例3: update

import org.apache.hadoop.mapred.ClusterStatus; //导入方法依赖的package包/类
/**
 * STRESS Once you get the notification from StatsCollector.Collect the
 * clustermetrics. Update current loadStatus with new load status of JT.
 *
 * @param item
 */
@Override
public void update(Statistics.ClusterStats item) {
  ClusterStatus clusterStatus = item.getStatus();
  try {
    // update the max cluster map/reduce task capacity
    loadStatus.updateMapCapacity(clusterStatus.getMaxMapTasks());
    
    loadStatus.updateReduceCapacity(clusterStatus.getMaxReduceTasks());
    
    int numTrackers = clusterStatus.getTaskTrackers();
    int jobLoad = 
      (int) (maxJobTrackerRatio * numTrackers) - item.getNumRunningJob();
    loadStatus.updateJobLoad(jobLoad);
  } catch (Exception e) {
    LOG.error("Couldn't get the new Status",e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:StressJobFactory.java

示例4: generateSummaryTable

import org.apache.hadoop.mapred.ClusterStatus; //导入方法依赖的package包/类
/**
 * Generates an XML-formatted block that summarizes the state of the JobTracker.
 */
public void generateSummaryTable(JspWriter out,
                                 JobTracker tracker) throws IOException {
  ClusterStatus status = tracker.getClusterStatus();
  int maxMapTasks = status.getMaxMapTasks();
  int maxReduceTasks = status.getMaxReduceTasks();
  int numTaskTrackers = status.getTaskTrackers();
  String tasksPerNodeStr;
  if (numTaskTrackers > 0) {
    double tasksPerNodePct = (double) (maxMapTasks + maxReduceTasks) / (double) numTaskTrackers;
    tasksPerNodeStr = percentFormat.format(tasksPerNodePct);
  } else {
    tasksPerNodeStr = "-";
  }
  out.print("<maps>" + status.getMapTasks() + "</maps>\n" +
          "<reduces>" + status.getReduceTasks() + "</reduces>\n" +
          "<total_submissions>" + tracker.getTotalSubmissions() + "</total_submissions>\n" +
          "<nodes>" + status.getTaskTrackers() + "</nodes>\n" +
          "<map_task_capacity>" + status.getMaxMapTasks() + "</map_task_capacity>\n" +
          "<reduce_task_capacity>" + status.getMaxReduceTasks() + "</reduce_task_capacity>\n" +
          "<avg_tasks_per_node>" + tasksPerNodeStr + "</avg_tasks_per_node>\n");
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:25,代码来源:JobTrackerJspHelper.java

示例5: analyzeHadoopCluster

import org.apache.hadoop.mapred.ClusterStatus; //导入方法依赖的package包/类
/**
 * Analyzes properties of hadoop cluster and configuration.
 */
private static void analyzeHadoopCluster() {
	try {
		JobConf job = ConfigurationManager.getCachedJobConf();
		JobClient client = new JobClient(job);
		ClusterStatus stat = client.getClusterStatus();
		if( stat != null ) { //if in cluster mode
			//analyze cluster status
			_remotePar = stat.getTaskTrackers();
			_remoteParMap = stat.getMaxMapTasks(); 
			_remoteParReduce = stat.getMaxReduceTasks(); 
			
			//analyze pure configuration properties
			analyzeHadoopConfiguration();
		}
	} 
	catch (IOException e) {
		throw new RuntimeException("Unable to analyze infrastructure.",e);
	}
}
 
开发者ID:apache,项目名称:systemml,代码行数:23,代码来源:InfrastructureAnalyzer.java

示例6: getNumberTaskTrackers

import org.apache.hadoop.mapred.ClusterStatus; //导入方法依赖的package包/类
public static int getNumberTaskTrackers(Configuration conf) throws IOException
{
	/* XXX hack to get the ClusterStatus.  To use JobClient it seems I need to
	 * wrap the Configuration with the deprecated JobConf.
	 * Is there a better way?
	 */
	ClusterStatus status = (new JobClient(new JobConf(conf))).getClusterStatus();
	return status.getTaskTrackers();
}
 
开发者ID:ilveroluca,项目名称:seal,代码行数:10,代码来源:ClusterUtils.java


注:本文中的org.apache.hadoop.mapred.ClusterStatus.getTaskTrackers方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。