本文整理汇总了Java中org.apache.hadoop.tools.rumen.JobStory.getName方法的典型用法代码示例。如果您正苦于以下问题:Java JobStory.getName方法的具体用法?Java JobStory.getName怎么用?Java JobStory.getName使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.tools.rumen.JobStory
的用法示例。
在下文中一共展示了JobStory.getName方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: verifyWordCountJobStory
import org.apache.hadoop.tools.rumen.JobStory; //导入方法依赖的package包/类
/**
* Verifies that the given {@code JobStory} corresponds to the checked-in
* WordCount {@code JobStory}. The verification is effected via JUnit
* assertions.
*
* @param js the candidate JobStory.
*/
private void verifyWordCountJobStory(JobStory js) {
assertNotNull("Null JobStory", js);
String expectedJobStory = "WordCount:johndoe:default:1285322645148:3:1";
String actualJobStory = js.getName() + ":" + js.getUser() + ":"
+ js.getQueueName() + ":" + js.getSubmissionTime() + ":"
+ js.getNumberMaps() + ":" + js.getNumberReduces();
assertEquals("Unexpected JobStory", expectedJobStory, actualJobStory);
}
示例2: SimulatorJobInProgress
import org.apache.hadoop.tools.rumen.JobStory; //导入方法依赖的package包/类
@SuppressWarnings("deprecation")
public SimulatorJobInProgress(JobID jobid, JobTracker jobtracker,
JobConf default_conf, JobStory jobStory) {
super(jobid, jobStory.getJobConf(), jobtracker);
// jobSetupCleanupNeeded set to false in parent cstr, though
// default is true
restartCount = 0;
jobSetupCleanupNeeded = false;
this.memoryPerMap = conf.getMemoryForMapTask();
this.memoryPerReduce = conf.getMemoryForReduceTask();
this.maxTaskFailuresPerTracker = conf.getMaxTaskFailuresPerTracker();
this.jobId = jobid;
String url = "http://" + jobtracker.getJobTrackerMachine() + ":"
+ jobtracker.getInfoPort() + "/jobdetails.jsp?jobid=" + jobid;
this.jobtracker = jobtracker;
this.conf = jobStory.getJobConf();
this.priority = conf.getJobPriority();
Path jobDir = jobtracker.getSystemDirectoryForJob(jobid);
this.jobFile = new Path(jobDir, "job.xml");
this.status = new JobStatus(jobid, 0.0f, 0.0f, 0.0f, 0.0f, JobStatus.PREP,
priority, conf.getUser());
this.profile = new JobProfile(jobStory.getUser(), jobid, this.jobFile
.toString(), url, jobStory.getName(), conf.getQueueName());
this.startTime = JobTracker.getClock().getTime();
status.setStartTime(startTime);
this.resourceEstimator = new ResourceEstimator(this);
this.numMapTasks = jobStory.getNumberMaps();
this.numReduceTasks = jobStory.getNumberReduces();
this.taskCompletionEvents = new ArrayList<TaskCompletionEvent>(numMapTasks
+ numReduceTasks + 10);
this.mapFailuresPercent = conf.getMaxMapTaskFailuresPercent();
this.reduceFailuresPercent = conf.getMaxReduceTaskFailuresPercent();
MetricsContext metricsContext = MetricsUtil.getContext("mapred");
this.jobMetrics = MetricsUtil.createRecord(metricsContext, "job");
this.jobMetrics.setTag("user", conf.getUser());
this.jobMetrics.setTag("sessionId", conf.getSessionId());
this.jobMetrics.setTag("jobName", conf.getJobName());
this.jobMetrics.setTag("jobId", jobid.toString());
this.maxLevel = jobtracker.getNumTaskCacheLevels();
this.anyCacheLevel = this.maxLevel + 1;
this.nonLocalMaps = new LinkedList<TaskInProgress>();
this.nonLocalRunningMaps = new LinkedHashSet<TaskInProgress>();
this.runningMapCache = new IdentityHashMap<Node, Set<TaskInProgress>>();
this.nonRunningReduces = new LinkedList<TaskInProgress>();
this.runningReduces = new LinkedHashSet<TaskInProgress>();
this.slowTaskThreshold = Math.max(0.0f, conf.getFloat(
"mapred.speculative.execution.slowTaskThreshold", 1.0f));
this.speculativeCap = conf.getFloat(
"mapred.speculative.execution.speculativeCap", 0.1f);
this.slowNodeThreshold = conf.getFloat(
"mapred.speculative.execution.slowNodeThreshold", 1.0f);
this.jobStory = jobStory;
// this.jobHistory = this.jobtracker.getJobHistory();
}