本文整理汇总了Java中org.apache.hadoop.mapred.ClusterStatus.getMaxMapTasks方法的典型用法代码示例。如果您正苦于以下问题:Java ClusterStatus.getMaxMapTasks方法的具体用法?Java ClusterStatus.getMaxMapTasks怎么用?Java ClusterStatus.getMaxMapTasks使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapred.ClusterStatus
的用法示例。
在下文中一共展示了ClusterStatus.getMaxMapTasks方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: generateSummaryTable
import org.apache.hadoop.mapred.ClusterStatus; //导入方法依赖的package包/类
/**
* Generates an XML-formatted block that summarizes the state of the JobTracker.
*/
public void generateSummaryTable(JspWriter out,
JobTracker tracker) throws IOException {
ClusterStatus status = tracker.getClusterStatus();
int maxMapTasks = status.getMaxMapTasks();
int maxReduceTasks = status.getMaxReduceTasks();
int numTaskTrackers = status.getTaskTrackers();
String tasksPerNodeStr;
if (numTaskTrackers > 0) {
double tasksPerNodePct = (double) (maxMapTasks + maxReduceTasks) / (double) numTaskTrackers;
tasksPerNodeStr = percentFormat.format(tasksPerNodePct);
} else {
tasksPerNodeStr = "-";
}
out.print("<maps>" + status.getMapTasks() + "</maps>\n" +
"<reduces>" + status.getReduceTasks() + "</reduces>\n" +
"<total_submissions>" + tracker.getTotalSubmissions() + "</total_submissions>\n" +
"<nodes>" + status.getTaskTrackers() + "</nodes>\n" +
"<map_task_capacity>" + status.getMaxMapTasks() + "</map_task_capacity>\n" +
"<reduce_task_capacity>" + status.getMaxReduceTasks() + "</reduce_task_capacity>\n" +
"<avg_tasks_per_node>" + tasksPerNodeStr + "</avg_tasks_per_node>\n");
}
示例2: analyzeHadoopCluster
import org.apache.hadoop.mapred.ClusterStatus; //导入方法依赖的package包/类
/**
* Analyzes properties of hadoop cluster and configuration.
*/
private static void analyzeHadoopCluster() {
try {
JobConf job = ConfigurationManager.getCachedJobConf();
JobClient client = new JobClient(job);
ClusterStatus stat = client.getClusterStatus();
if( stat != null ) { //if in cluster mode
//analyze cluster status
_remotePar = stat.getTaskTrackers();
_remoteParMap = stat.getMaxMapTasks();
_remoteParReduce = stat.getMaxReduceTasks();
//analyze pure configuration properties
analyzeHadoopConfiguration();
}
}
catch (IOException e) {
throw new RuntimeException("Unable to analyze infrastructure.",e);
}
}
示例3: getSuggestedNumberOfSplits
import org.apache.hadoop.mapred.ClusterStatus; //导入方法依赖的package包/类
/**
* Gets the number of input splits. First, tries the corresponding property,
* then falls back to the number of available slots.
*
* @param context job context
* @return number of input splits
*/
private int getSuggestedNumberOfSplits(JobContext context) throws IOException {
int numberOfSplits;
Configuration conf = context.getConfiguration();
numberOfSplits = conf.getInt(inputNumberOfSplitsProperty, -1);
if (numberOfSplits > 0) return numberOfSplits;
if (HServerParameters.isHServerJob(context.getConfiguration())) { //We are running a hServer job, not a Hadoop job
return HSERVER_JOB_DEFAULT_NUMBER_OF_SPLITS;
}
try {
ClusterStatus status = (new JobClient((JobConf) context.getConfiguration())).getClusterStatus();
numberOfSplits = status.getMaxMapTasks() - status.getMapTasks();
if (numberOfSplits > 0) return numberOfSplits;
} catch (Throwable t) {
//Do nothing, will fall back to default;
}
return DEFAULT_NUMBER_OF_SPLITS;
}
示例4: getNumAvailableMaps
import org.apache.hadoop.mapred.ClusterStatus; //导入方法依赖的package包/类
public static final int getNumAvailableMaps () throws IOException {
JobConf job = new JobConf(Utils.class);
JobClient client = new JobClient(job);
ClusterStatus cluster = client.getClusterStatus();
int maxMaps = cluster.getMaxMapTasks();
int runnings = cluster.getMapTasks();
return maxMaps - runnings;
}
示例5: getNumSlotsForStore
import org.apache.hadoop.mapred.ClusterStatus; //导入方法依赖的package包/类
public int getNumSlotsForStore() throws IOException {
ClusterStatus clusterStatus = getClusterStatus();
// get the number of slots allocated to the store
return isStoreInMapSide() ?
clusterStatus.getMaxMapTasks() :
clusterStatus.getMaxReduceTasks();
}
示例6: getMaxNumMaps
import org.apache.hadoop.mapred.ClusterStatus; //导入方法依赖的package包/类
public static final int getMaxNumMaps () throws IOException {
JobConf job = new JobConf(Utils.class);
JobClient client = new JobClient(job);
ClusterStatus cluster = client.getClusterStatus();
return cluster.getMaxMapTasks();
}