本文整理汇总了Java中org.apache.hadoop.mapreduce.ClusterMetrics类的典型用法代码示例。如果您正苦于以下问题:Java ClusterMetrics类的具体用法?Java ClusterMetrics怎么用?Java ClusterMetrics使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ClusterMetrics类属于org.apache.hadoop.mapreduce包,在下文中一共展示了ClusterMetrics类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getClusterStatus
import org.apache.hadoop.mapreduce.ClusterMetrics; //导入依赖的package包/类
/**
* Get status information about the Map-Reduce cluster.
*
* @return the status information about the Map-Reduce cluster as an object
* of {@link ClusterStatus}.
* @throws IOException
*/
public ClusterStatus getClusterStatus() throws IOException {
try {
return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
public ClusterStatus run() throws IOException, InterruptedException {
ClusterMetrics metrics = cluster.getClusterStatus();
return new ClusterStatus(metrics.getTaskTrackerCount(), metrics
.getBlackListedTaskTrackerCount(), cluster
.getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
metrics.getReduceSlotCapacity(), cluster.getJobTrackerStatus(),
metrics.getDecommissionedTaskTrackerCount(), metrics
.getGrayListedTaskTrackerCount());
}
});
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
示例2: chooseMachine
import org.apache.hadoop.mapreduce.ClusterMetrics; //导入依赖的package包/类
/**
* Choose a Machine in runtime according to the cluster status.
*/
private Machine chooseMachine(Configuration conf) throws IOException {
final int parts = conf.getInt(N_PARTS, Integer.MAX_VALUE);
try {
for(;; Thread.sleep(2000)) {
//get cluster status
final ClusterMetrics status = cluster.getClusterStatus();
final int m =
status.getMapSlotCapacity() - status.getOccupiedMapSlots();
final int r =
status.getReduceSlotCapacity() - status.getOccupiedReduceSlots();
if (m >= parts || r >= parts) {
//favor ReduceSide machine
final Machine value = r >= parts?
ReduceSide.INSTANCE: MapSide.INSTANCE;
Util.out.println(" " + this + " is " + value + " (m=" + m + ", r=" + r + ")");
return value;
}
}
} catch (InterruptedException e) {
throw new IOException(e);
}
}
示例3: getSummary
import org.apache.hadoop.mapreduce.ClusterMetrics; //导入依赖的package包/类
InfoMap getSummary() {
final ClusterMetrics metrics = getClusterMetrics();
InfoMap map = new InfoMap();
map.put("nodes", metrics.getTaskTrackerCount()
+ getBlacklistedTrackerCount());
map.put("alive", metrics.getTaskTrackerCount());
map.put("blacklisted", getBlacklistedTrackerCount());
map.put("slots", new InfoMap() {{
put("map_slots", metrics.getMapSlotCapacity());
put("map_slots_used", metrics.getOccupiedMapSlots());
put("reduce_slots", metrics.getReduceSlotCapacity());
put("reduce_slots_used", metrics.getOccupiedReduceSlots());
}});
map.put("jobs", metrics.getTotalJobSubmissions());
return map;
}
示例4: getSummary
import org.apache.hadoop.mapreduce.ClusterMetrics; //导入依赖的package包/类
InfoMap getSummary() {
final ClusterMetrics metrics = getClusterMetrics();
InfoMap map = new InfoMap();
map.put("nodes", metrics.getTaskTrackerCount()
+ getBlacklistedTrackerCount());
map.put("alive", metrics.getTaskTrackerCount());
map.put("blacklisted", getBlacklistedTrackerCount());
map.put("graylisted", getGraylistedTrackerCount());
map.put("slots", new InfoMap() {{
put("map_slots", metrics.getMapSlotCapacity());
put("map_slots_used", metrics.getOccupiedMapSlots());
put("reduce_slots", metrics.getReduceSlotCapacity());
put("reduce_slots_used", metrics.getOccupiedReduceSlots());
}});
map.put("jobs", metrics.getTotalJobSubmissions());
return map;
}
示例5: testTrackerReservationWithJobBlackListedTracker
import org.apache.hadoop.mapreduce.ClusterMetrics; //导入依赖的package包/类
/**
* Test case to check task tracker reservation for a job which
* has a job blacklisted tracker.
* <ol>
* <li>Run a job which fails on one of the tracker.</li>
* <li>Check if the job succeeds and has no reservation.</li>
* </ol>
*
* @throws Exception
*/
public void testTrackerReservationWithJobBlackListedTracker() throws Exception {
FakeJobInProgress job = TestTaskTrackerBlacklisting.runBlackListingJob(
jobTracker, trackers);
assertEquals("Job has no blacklisted trackers", 1, job
.getBlackListedTrackers().size());
assertTrue("Tracker 1 not blacklisted for the job", job
.getBlackListedTrackers().contains(
JobInProgress.convertTrackerNameToHostName(trackers[0])));
assertEquals("Job didnt complete successfully complete", job.getStatus()
.getRunState(), JobStatus.SUCCEEDED);
assertEquals("Reservation for the job not released: Maps",
0, job.getNumReservedTaskTrackersForMaps());
assertEquals("Reservation for the job not released : Reduces",
0, job.getNumReservedTaskTrackersForReduces());
ClusterMetrics metrics = jobTracker.getClusterMetrics();
assertEquals("reserved map slots do not match",
0, metrics.getReservedMapSlots());
assertEquals("reserved reduce slots do not match",
0, metrics.getReservedReduceSlots());
}
示例6: getClusterMetrics
import org.apache.hadoop.mapreduce.ClusterMetrics; //导入依赖的package包/类
public ClusterMetrics getClusterMetrics() throws IOException,
InterruptedException {
try {
YarnClusterMetrics metrics = client.getYarnClusterMetrics();
ClusterMetrics oldMetrics =
new ClusterMetrics(1, 1, 1, 1, 1, 1,
metrics.getNumNodeManagers() * 10,
metrics.getNumNodeManagers() * 2, 1,
metrics.getNumNodeManagers(), 0, 0);
return oldMetrics;
} catch (YarnException e) {
throw new IOException(e);
}
}