本文整理汇总了Java中org.apache.hadoop.mapreduce.ClusterMetrics.getMapSlotCapacity方法的典型用法代码示例。如果您正苦于以下问题:Java ClusterMetrics.getMapSlotCapacity方法的具体用法?Java ClusterMetrics.getMapSlotCapacity怎么用?Java ClusterMetrics.getMapSlotCapacity使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapreduce.ClusterMetrics
的用法示例。
在下文中一共展示了ClusterMetrics.getMapSlotCapacity方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: chooseMachine
import org.apache.hadoop.mapreduce.ClusterMetrics; //导入方法依赖的package包/类
/**
* Choose a Machine in runtime according to the cluster status.
*/
private Machine chooseMachine(Configuration conf) throws IOException {
final int parts = conf.getInt(N_PARTS, Integer.MAX_VALUE);
try {
for(;; Thread.sleep(2000)) {
//get cluster status
final ClusterMetrics status = cluster.getClusterStatus();
final int m =
status.getMapSlotCapacity() - status.getOccupiedMapSlots();
final int r =
status.getReduceSlotCapacity() - status.getOccupiedReduceSlots();
if (m >= parts || r >= parts) {
//favor ReduceSide machine
final Machine value = r >= parts?
ReduceSide.INSTANCE: MapSide.INSTANCE;
Util.out.println(" " + this + " is " + value + " (m=" + m + ", r=" + r + ")");
return value;
}
}
} catch (InterruptedException e) {
throw new IOException(e);
}
}
示例2: getClusterStatus
import org.apache.hadoop.mapreduce.ClusterMetrics; //导入方法依赖的package包/类
/**
* Get status information about the Map-Reduce cluster.
*
* @return the status information about the Map-Reduce cluster as an object
* of {@link ClusterStatus}.
* @throws IOException
*/
public ClusterStatus getClusterStatus() throws IOException {
try {
ClusterMetrics metrics = cluster.getClusterStatus();
return new ClusterStatus(metrics.getTaskTrackerCount(),
metrics.getBlackListedTaskTrackerCount(), cluster.getTaskTrackerExpiryInterval(),
metrics.getOccupiedMapSlots(),
metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
metrics.getReduceSlotCapacity(),
cluster.getJobTrackerStatus(),
metrics.getDecommissionedTaskTrackerCount());
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
示例3: isOverloaded
import org.apache.hadoop.mapreduce.ClusterMetrics; //导入方法依赖的package包/类
/**
* We try to use some light-weight mechanism to determine cluster load.
* @return Whether, from job client perspective, the cluster is overloaded.
*/
private boolean isOverloaded(long now) throws IOException {
try {
ClusterMetrics clusterMetrics = jobTracker.getClusterMetrics();
// If there are more jobs than number of task trackers, we assume the
// cluster is overloaded. This is to bound the memory usage of the
// simulator job tracker, in situations where we have jobs with small
// number of map tasks and large number of reduce tasks.
if (runningJobs.size() >= clusterMetrics.getTaskTrackerCount()) {
System.out.printf("%d Overloaded is %s: " +
"#runningJobs >= taskTrackerCount (%d >= %d)\n",
now, Boolean.TRUE.toString(),
runningJobs.size(), clusterMetrics.getTaskTrackerCount());
return true;
}
float incompleteMapTasks = 0; // include pending & running map tasks.
for (Map.Entry<JobID, JobSketchInfo> entry : runningJobs.entrySet()) {
org.apache.hadoop.mapreduce.JobStatus jobStatus = jobTracker
.getJobStatus(entry.getKey());
incompleteMapTasks += (1 - Math.min(jobStatus.getMapProgress(), 1.0))
* entry.getValue().numMaps;
}
boolean overloaded = incompleteMapTasks >
OVERLAOD_MAPTASK_MAPSLOT_RATIO * clusterMetrics.getMapSlotCapacity();
String relOp = (overloaded) ? ">" : "<=";
System.out.printf("%d Overloaded is %s: "
+ "incompleteMapTasks %s %.1f*mapSlotCapacity (%.1f %s %.1f*%d)\n",
now, Boolean.toString(overloaded), relOp, OVERLAOD_MAPTASK_MAPSLOT_RATIO,
incompleteMapTasks, relOp, OVERLAOD_MAPTASK_MAPSLOT_RATIO,
clusterMetrics.getMapSlotCapacity());
return overloaded;
} catch (InterruptedException e) {
throw new IOException("InterruptedException", e);
}
}