本文整理汇总了Java中org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker类的典型用法代码示例。如果您正苦于以下问题:Java TaskTracker类的具体用法?Java TaskTracker怎么用?Java TaskTracker使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
TaskTracker类属于org.apache.hadoop.mapreduce.server.jobtracker包,在下文中一共展示了TaskTracker类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: activeTaskTrackers
import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; //导入依赖的package包/类
/**
* Get the active task tracker statuses in the cluster
*
* @return {@link Collection} of active {@link TaskTrackerStatus}
*/
// This method is synchronized to make sure that the locking order
// "taskTrackers lock followed by faultyTrackers.potentiallyFaultyTrackers
// lock" is under JobTracker lock to avoid deadlocks.
synchronized public Collection<TaskTrackerStatus> activeTaskTrackers() {
Collection<TaskTrackerStatus> activeTrackers =
new ArrayList<TaskTrackerStatus>();
synchronized (taskTrackers) {
for ( TaskTracker tt : taskTrackers.values()) {
TaskTrackerStatus status = tt.getStatus();
if (!faultyTrackers.isBlacklisted(status.getHost())) {
activeTrackers.add(status);
}
}
}
return activeTrackers;
}
示例2: taskTrackerNames
import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; //导入依赖的package包/类
/**
* Get the active and blacklisted task tracker names in the cluster. The first
* element in the returned list contains the list of active tracker names.
* The second element in the returned list contains the list of blacklisted
* tracker names.
*/
// This method is synchronized to make sure that the locking order
// "taskTrackers lock followed by faultyTrackers.potentiallyFaultyTrackers
// lock" is under JobTracker lock to avoid deadlocks.
synchronized public List<List<String>> taskTrackerNames() {
List<String> activeTrackers =
new ArrayList<String>();
List<String> blacklistedTrackers =
new ArrayList<String>();
synchronized (taskTrackers) {
for (TaskTracker tt : taskTrackers.values()) {
TaskTrackerStatus status = tt.getStatus();
if (!faultyTrackers.isBlacklisted(status.getHost())) {
activeTrackers.add(status.getTrackerName());
} else {
blacklistedTrackers.add(status.getTrackerName());
}
}
}
List<List<String>> result = new ArrayList<List<String>>(2);
result.add(activeTrackers);
result.add(blacklistedTrackers);
return result;
}
示例3: blacklistedTaskTrackers
import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; //导入依赖的package包/类
/**
* Get the blacklisted task tracker statuses in the cluster
*
* @return {@link Collection} of blacklisted {@link TaskTrackerStatus}
*/
// This method is synchronized to make sure that the locking order
// "taskTrackers lock followed by faultyTrackers.potentiallyFaultyTrackers
// lock" is under JobTracker lock to avoid deadlocks.
synchronized public Collection<TaskTrackerStatus> blacklistedTaskTrackers() {
Collection<TaskTrackerStatus> blacklistedTrackers =
new ArrayList<TaskTrackerStatus>();
synchronized (taskTrackers) {
for (TaskTracker tt : taskTrackers.values()) {
TaskTrackerStatus status = tt.getStatus();
if (faultyTrackers.isBlacklisted(status.getHost())) {
blacklistedTrackers.add(status);
}
}
}
return blacklistedTrackers;
}
示例4: addNewTracker
import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; //导入依赖的package包/类
/**
* Adds a new node to the jobtracker. It involves adding it to the expiry
* thread and adding it for resolution
*
* Assumes JobTracker, taskTrackers and trackerExpiryQueue is locked on entry
*
* @param status Task Tracker's status
*/
private void addNewTracker(TaskTracker taskTracker) {
TaskTrackerStatus status = taskTracker.getStatus();
trackerExpiryQueue.add(status);
// Register the tracker if its not registered
String hostname = status.getHost();
if (getNode(status.getTrackerName()) == null) {
// Making the network location resolution inline ..
resolveAndAddToTopology(hostname);
}
// add it to the set of tracker per host
Set<TaskTracker> trackers = hostnameToTaskTracker.get(hostname);
if (trackers == null) {
trackers = Collections.synchronizedSet(new HashSet<TaskTracker>());
hostnameToTaskTracker.put(hostname, trackers);
}
statistics.taskTrackerAdded(status.getTrackerName());
getInstrumentation().addTrackers(1);
LOG.info("Adding tracker " + status.getTrackerName() + " to host "
+ hostname);
trackers.add(taskTracker);
}
示例5: refreshHosts
import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; //导入依赖的package包/类
private synchronized void refreshHosts() throws IOException {
// Reread the config to get mapred.hosts and mapred.hosts.exclude filenames.
// Update the file names and refresh internal includes and excludes list
LOG.info("Refreshing hosts information");
Configuration conf = new Configuration();
hostsReader.updateFileNames(conf.get("mapred.hosts",""),
conf.get("mapred.hosts.exclude", ""));
hostsReader.refresh();
Set<String> excludeSet = new HashSet<String>();
for(Map.Entry<String, TaskTracker> eSet : taskTrackers.entrySet()) {
String trackerName = eSet.getKey();
TaskTrackerStatus status = eSet.getValue().getStatus();
// Check if not include i.e not in host list or in hosts list but excluded
if (!inHostsList(status) || inExcludedHostsList(status)) {
excludeSet.add(status.getHost()); // add to rejected trackers
}
}
decommissionNodes(excludeSet);
}
示例6: decommissionNodes
import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; //导入依赖的package包/类
synchronized void decommissionNodes(Set<String> hosts)
throws IOException {
LOG.info("Decommissioning " + hosts.size() + " nodes");
// create a list of tracker hostnames
synchronized (taskTrackers) {
synchronized (trackerExpiryQueue) {
int trackersDecommissioned = 0;
for (String host : hosts) {
LOG.info("Decommissioning host " + host);
Set<TaskTracker> trackers = hostnameToTaskTracker.remove(host);
if (trackers != null) {
for (TaskTracker tracker : trackers) {
LOG.info("Decommission: Losing tracker " + tracker.getTrackerName() +
" on host " + host);
removeTracker(tracker);
}
trackersDecommissioned += trackers.size();
}
LOG.info("Host " + host + " is ready for decommissioning");
}
getInstrumentation().setDecommissionedTrackers(trackersDecommissioned);
}
}
}
示例7: FakeTaskTrackerManager
import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; //导入依赖的package包/类
public FakeTaskTrackerManager() {
JobConf conf = new JobConf();
queueManager = new QueueManager(conf);
TaskTracker tt1 = new TaskTracker("tt1");
tt1.setStatus(new TaskTrackerStatus("tt1", "http", "tt1.host", 1,
new ArrayList<TaskStatus>(), 0, 0,
maxMapTasksPerTracker, maxReduceTasksPerTracker));
trackers.put("tt1", tt1);
TaskTracker tt2 = new TaskTracker("tt2");
tt2.setStatus(new TaskTrackerStatus("tt2", "http", "tt2.host", 2,
new ArrayList<TaskStatus>(), 0, 0,
maxMapTasksPerTracker, maxReduceTasksPerTracker));
trackers.put("tt2", tt2);
}
示例8: assignTasks
import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; //导入依赖的package包/类
@Override
public List<Task> assignTasks(TaskTracker tt) {
if (unreserveSlots) {
tt.unreserveSlots(TaskType.MAP, fakeJob);
tt.unreserveSlots(TaskType.REDUCE, fakeJob);
} else {
int currCount = 1;
if (reservedCounts.containsKey(tt)) {
currCount = reservedCounts.get(tt) + 1;
}
reservedCounts.put(tt, currCount);
tt.reserveSlots(TaskType.MAP, fakeJob, currCount);
tt.reserveSlots(TaskType.REDUCE, fakeJob, currCount);
}
return new ArrayList<Task>();
}
示例9: testDefaultResourceValues
import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; //导入依赖的package包/类
/**
* Test that verifies default values are configured and reported correctly.
*
* @throws Exception
*/
public void testDefaultResourceValues()
throws Exception {
JobConf conf = new JobConf();
try {
// Memory values are disabled by default.
conf.setClass(
org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
setUpCluster(conf);
JobConf jobConf = miniMRCluster.createJobConf();
jobConf.setClass(
org.apache.hadoop.mapred.TaskTracker.TT_RESOURCE_CALCULATOR_PLUGIN,
DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
runSleepJob(jobConf);
verifyTestResults();
} finally {
tearDownCluster();
}
}
示例10: FakeTaskTrackerManager
import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; //导入依赖的package包/类
public FakeTaskTrackerManager() {
TaskTracker tt1 = new TaskTracker("tt1");
tt1.setStatus(new TaskTrackerStatus("tt1", "http", "tt1.host", 1,
new ArrayList<TaskStatus>(), 0, 0,
maxMapTasksPerTracker,
maxReduceTasksPerTracker));
trackers.put("tt1", tt1);
TaskTracker tt2 = new TaskTracker("tt2");
tt2.setStatus(new TaskTrackerStatus("tt2", "http", "tt2.host", 2,
new ArrayList<TaskStatus>(), 0, 0,
maxMapTasksPerTracker,
maxReduceTasksPerTracker));
trackers.put("tt2", tt2);
}
示例11: activeTaskTrackers
import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; //导入依赖的package包/类
/**
* Get the active task tracker statuses in the cluster
*
* @return {@link Collection} of active {@link TaskTrackerStatus}
*/
// This method is synchronized to make sure that the locking order
// "taskTrackers lock followed by faultyTrackers.potentiallyFaultyTrackers
// lock" is under JobTracker lock to avoid deadlocks.
synchronized public Collection<TaskTrackerStatus> activeTaskTrackers() {
Collection<TaskTrackerStatus> activeTrackers =
new ArrayList<TaskTrackerStatus>();
synchronized (taskTrackers) {
for ( TaskTracker tt : taskTrackers.values()) {
TaskTrackerStatus status = tt.getStatus();
if (!faultyTrackers.isBlacklisted(status.getHost())) {
activeTrackers.add(status);
}
}
}
return activeTrackers;
}
示例12: taskTrackerNames
import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; //导入依赖的package包/类
/**
* Get the active and blacklisted task tracker names in the cluster. The first
* element in the returned list contains the list of active tracker names.
* The second element in the returned list contains the list of blacklisted
* tracker names.
*/
// This method is synchronized to make sure that the locking order
// "taskTrackers lock followed by faultyTrackers.potentiallyFaultyTrackers
// lock" is under JobTracker lock to avoid deadlocks.
synchronized public List<List<String>> taskTrackerNames() {
List<String> activeTrackers =
new ArrayList<String>();
List<String> blacklistedTrackers =
new ArrayList<String>();
synchronized (taskTrackers) {
for (TaskTracker tt : taskTrackers.values()) {
TaskTrackerStatus status = tt.getStatus();
if (!faultyTrackers.isBlacklisted(status.getHost())) {
activeTrackers.add(status.getTrackerName());
} else {
blacklistedTrackers.add(status.getTrackerName());
}
}
}
List<List<String>> result = new ArrayList<List<String>>(2);
result.add(activeTrackers);
result.add(blacklistedTrackers);
return result;
}
示例13: blacklistedTaskTrackers
import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; //导入依赖的package包/类
/**
* Get the blacklisted task tracker statuses in the cluster
*
* @return {@link Collection} of blacklisted {@link TaskTrackerStatus}
*/
// This method is synchronized to make sure that the locking order
// "taskTrackers lock followed by faultyTrackers.potentiallyFaultyTrackers
// lock" is under JobTracker lock to avoid deadlocks.
synchronized public Collection<TaskTrackerStatus> blacklistedTaskTrackers() {
Collection<TaskTrackerStatus> blacklistedTrackers =
new ArrayList<TaskTrackerStatus>();
synchronized (taskTrackers) {
for (TaskTracker tt : taskTrackers.values()) {
TaskTrackerStatus status = tt.getStatus();
if (faultyTrackers.isBlacklisted(status.getHost())) {
blacklistedTrackers.add(status);
}
}
}
return blacklistedTrackers;
}
示例14: addNewTracker
import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; //导入依赖的package包/类
/**
* Adds a new node to the jobtracker. It involves adding it to the expiry
* thread and adding it for resolution
*
* Assumes JobTracker, taskTrackers and trackerExpiryQueue is locked on entry
*
* @param taskTracker Task Tracker
*/
void addNewTracker(TaskTracker taskTracker) {
TaskTrackerStatus status = taskTracker.getStatus();
trackerExpiryQueue.add(status);
// Register the tracker if its not registered
String hostname = status.getHost();
if (getNode(status.getTrackerName()) == null) {
// Making the network location resolution inline ..
resolveAndAddToTopology(hostname);
}
// add it to the set of tracker per host
Set<TaskTracker> trackers = hostnameToTaskTracker.get(hostname);
if (trackers == null) {
trackers = Collections.synchronizedSet(new HashSet<TaskTracker>());
hostnameToTaskTracker.put(hostname, trackers);
}
statistics.taskTrackerAdded(status.getTrackerName());
getInstrumentation().addTrackers(1);
LOG.info("Adding tracker " + status.getTrackerName() + " to host "
+ hostname);
trackers.add(taskTracker);
}
示例15: refreshHosts
import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; //导入依赖的package包/类
private synchronized void refreshHosts() throws IOException {
// Reread the config to get mapred.hosts and mapred.hosts.exclude filenames.
// Update the file names and refresh internal includes and excludes list
LOG.info("Refreshing hosts information");
Configuration conf = new Configuration();
hostsReader.updateFileNames(conf.get("mapred.hosts",""),
conf.get("mapred.hosts.exclude", ""));
hostsReader.refresh();
Set<String> excludeSet = new HashSet<String>();
for(Map.Entry<String, TaskTracker> eSet : taskTrackers.entrySet()) {
String trackerName = eSet.getKey();
TaskTrackerStatus status = eSet.getValue().getStatus();
// Check if not include i.e not in host list or in hosts list but excluded
if (!inHostsList(status) || inExcludedHostsList(status)) {
excludeSet.add(status.getHost()); // add to rejected trackers
}
}
decommissionNodes(excludeSet);
int totalExcluded = hostsReader.getExcludedHosts().size();
getInstrumentation().setDecommissionedTrackers(totalExcluded);
}