本文整理汇总了Java中org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus类的典型用法代码示例。如果您正苦于以下问题:Java JobTrackerStatus类的具体用法?Java JobTrackerStatus怎么用?Java JobTrackerStatus使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
JobTrackerStatus类属于org.apache.hadoop.mapreduce.Cluster包,在下文中一共展示了JobTrackerStatus类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ClusterStatus
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval,
int maps, int reduces, int maxMaps, int maxReduces,
JobTrackerStatus status, int numDecommissionedNodes,
long used_memory, long max_memory) {
numActiveTrackers = trackers;
numBlacklistedTrackers = blacklists;
this.numExcludedNodes = numDecommissionedNodes;
this.ttExpiryInterval = ttExpiryInterval;
map_tasks = maps;
reduce_tasks = reduces;
max_map_tasks = maxMaps;
max_reduce_tasks = maxReduces;
this.status = status;
this.used_memory = used_memory;
this.max_memory = max_memory;
}
示例2: getClusterStatus
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
public synchronized ClusterStatus getClusterStatus(boolean detailed) {
synchronized (taskTrackers) {
if (detailed) {
List<List<String>> trackerNames = taskTrackerNames();
return new ClusterStatus(trackerNames.get(0),
trackerNames.get(1),
TASKTRACKER_EXPIRY_INTERVAL,
totalMaps,
totalReduces,
totalMapTaskCapacity,
totalReduceTaskCapacity,
JobTrackerStatus.valueOf(state.name()), getExcludedNodes().size()
);
} else {
return new ClusterStatus(taskTrackers.size() -
getBlacklistedTrackerCount(),
getBlacklistedTrackerCount(),
TASKTRACKER_EXPIRY_INTERVAL,
totalMaps,
totalReduces,
totalMapTaskCapacity,
totalReduceTaskCapacity,
JobTrackerStatus.valueOf(state.name()), getExcludedNodes().size());
}
}
}
示例3: testClientFailover
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
@Test(timeout=60000)
public void testClientFailover() throws Exception {
LOG.info("Running testClientFailover");
startCluster();
// Test with client. c.f. HATestUtil.setFailoverConfigurations
JobClient jc = new JobClient(conf);
assertEquals("client sees jt running", JobTrackerStatus.RUNNING,
jc.getClusterStatus().getJobTrackerStatus());
// failover to jt2
FailoverController fc = new FailoverController(conf,
RequestSource.REQUEST_BY_USER);
fc.failover(target1, target2, false, false);
cluster.waitActive();
assertEquals("jt2 running", JobTrackerStatus.RUNNING,
jt2.getJobTracker().getClusterStatus().getJobTrackerStatus());
assertNull("jt1 not running", jt1.getJobTracker());
assertEquals("client still sees jt running", JobTrackerStatus.RUNNING,
jc.getClusterStatus().getJobTrackerStatus());
}
示例4: readFields
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
numActiveTrackers = in.readInt();
int numTrackerNames = in.readInt();
if (numTrackerNames > 0) {
for (int i = 0; i < numTrackerNames; i++) {
String name = Text.readString(in);
activeTrackers.add(name);
}
}
numBlacklistedTrackers = in.readInt();
int blackListTrackerInfoSize = in.readInt();
if(blackListTrackerInfoSize > 0) {
for (int i = 0; i < blackListTrackerInfoSize; i++) {
BlackListInfo info = new BlackListInfo();
info.readFields(in);
blacklistedTrackersInfo.add(info);
}
}
numExcludedNodes = in.readInt();
ttExpiryInterval = in.readLong();
map_tasks = in.readInt();
reduce_tasks = in.readInt();
max_map_tasks = in.readInt();
max_reduce_tasks = in.readInt();
status = WritableUtils.readEnum(in, JobTrackerStatus.class);
}
示例5: waitForJobTracker
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
/**
* Wait for the jobtracker to be RUNNING.
*/
static void waitForJobTracker(JobClient jobClient) {
while (true) {
try {
ClusterStatus status = jobClient.getClusterStatus();
while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) {
waitFor(100);
status = jobClient.getClusterStatus();
}
break; // means that the jt is ready
} catch (IOException ioe) {}
}
}
示例6: ClusterStatus
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
/**
* Construct a new cluster status.
*
* @param trackers no. of tasktrackers in the cluster
* @param blacklists no of blacklisted task trackers in the cluster
* @param ttExpiryInterval the tasktracker expiry interval
* @param maps no. of currently running map-tasks in the cluster
* @param reduces no. of currently running reduce-tasks in the cluster
* @param maxMaps the maximum no. of map tasks in the cluster
* @param maxReduces the maximum no. of reduce tasks in the cluster
* @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
* @param numDecommissionedNodes number of decommission trackers
* @param numGrayListedTrackers number of graylisted trackers
*/
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, int maps,
int reduces, int maxMaps, int maxReduces, JobTrackerStatus status,
int numDecommissionedNodes, int numGrayListedTrackers) {
numActiveTrackers = trackers;
numBlacklistedTrackers = blacklists;
this.numExcludedNodes = numDecommissionedNodes;
this.ttExpiryInterval = ttExpiryInterval;
map_tasks = maps;
reduce_tasks = reduces;
max_map_tasks = maxMaps;
max_reduce_tasks = maxReduces;
this.status = status;
this.grayListedTrackers = numGrayListedTrackers;
}
示例7: readFields
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
numActiveTrackers = in.readInt();
int numTrackerNames = in.readInt();
if (numTrackerNames > 0) {
for (int i = 0; i < numTrackerNames; i++) {
String name = StringInterner.weakIntern(Text.readString(in));
activeTrackers.add(name);
}
}
numBlacklistedTrackers = in.readInt();
int blackListTrackerInfoSize = in.readInt();
if(blackListTrackerInfoSize > 0) {
for (int i = 0; i < blackListTrackerInfoSize; i++) {
BlackListInfo info = new BlackListInfo();
info.readFields(in);
blacklistedTrackersInfo.add(info);
}
}
numExcludedNodes = in.readInt();
ttExpiryInterval = in.readLong();
map_tasks = in.readInt();
reduce_tasks = in.readInt();
max_map_tasks = in.readInt();
max_reduce_tasks = in.readInt();
status = WritableUtils.readEnum(in, JobTrackerStatus.class);
grayListedTrackers = in.readInt();
}
示例8: waitActive
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
public void waitActive() throws IOException {
while (true) {
for (JobTrackerHADaemon jtHaDaemon : jtHaDaemonList) {
JobTracker jt = jtHaDaemon.getJobTracker();
if (jt != null) {
if (jt.getClusterStatus().getJobTrackerStatus() == JobTrackerStatus.RUNNING) {
return;
}
}
}
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {}
}
}
示例9: getClusterStatus
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
@Override
public ClusterStatus getClusterStatus() {
int numTrackers = trackers.size();
return new ClusterStatus(numTrackers, 0,
JobTracker.TASKTRACKER_EXPIRY_INTERVAL,
maps, reduces,
numTrackers * maxMapTasksPerTracker,
numTrackers * maxReduceTasksPerTracker,
JobTrackerStatus.RUNNING);
}
示例10: getClusterStatus
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
public ClusterStatus getClusterStatus() {
int numTrackers = trackers.size();
return new ClusterStatus(numTrackers, 0,
JobTracker.TASKTRACKER_EXPIRY_INTERVAL,
maps, reduces,
numTrackers * maxMapTasksPerTracker,
numTrackers * maxReduceTasksPerTracker,
JobTrackerStatus.RUNNING);
}
示例11: tearDown
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
@After
public void tearDown() {
ClusterStatus status = mr.getJobTrackerRunner().getJobTracker()
.getClusterStatus(false);
if (status.getJobTrackerStatus() == JobTrackerStatus.RUNNING) {
mr.shutdown();
}
if (dfs != null) {
dfs.shutdown();
}
}
示例12: getClusterStatus
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; //导入依赖的package包/类
public ClusterStatus getClusterStatus() {
int numTrackers = trackers.size();
return new ClusterStatus(numTrackers, maps, reduces,
numTrackers * maxMapTasksPerTracker,
numTrackers * maxReduceTasksPerTracker,
JobTrackerStatus.RUNNING);
}