本文整理汇总了Java中org.apache.hadoop.tools.rumen.JobStory类的典型用法代码示例。如果您正苦于以下问题:Java JobStory类的具体用法?Java JobStory怎么用?Java JobStory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
JobStory类属于org.apache.hadoop.tools.rumen包,在下文中一共展示了JobStory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: generateJobStats
import org.apache.hadoop.tools.rumen.JobStory; //导入依赖的package包/类
/**
* Generates a job stats.
*/
public static JobStats generateJobStats(Job job, JobStory jobdesc) {
int seq = GridmixJob.getJobSeqId(job);
// bail out if job description is missing for a job to be simulated
if (seq >= 0 && jobdesc == null) {
throw new IllegalArgumentException("JobStory not available for job "
+ job.getJobID());
}
int maps = -1;
int reds = -1;
if (jobdesc != null) {
// Note that the ZombieJob will return a >= 0 value
maps = jobdesc.getNumberMaps();
reds = jobdesc.getNumberReduces();
}
return new JobStats(maps, reds, job);
}
示例2: testCompareGridmixJob
import org.apache.hadoop.tools.rumen.JobStory; //导入依赖的package包/类
@Test (timeout=30000)
public void testCompareGridmixJob() throws Exception {
Configuration conf = new Configuration();
Path outRoot = new Path("target");
JobStory jobDesc = mock(JobStory.class);
when(jobDesc.getName()).thenReturn("JobName");
when(jobDesc.getJobConf()).thenReturn(new JobConf(conf));
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
GridmixJob j1 = new LoadJob(conf, 1000L, jobDesc, outRoot, ugi, 0);
GridmixJob j2 = new LoadJob(conf, 1000L, jobDesc, outRoot, ugi, 0);
GridmixJob j3 = new LoadJob(conf, 1000L, jobDesc, outRoot, ugi, 1);
GridmixJob j4 = new LoadJob(conf, 1000L, jobDesc, outRoot, ugi, 1);
assertTrue(j1.equals(j2));
assertEquals(0, j1.compareTo(j2));
// Only one parameter matters
assertFalse(j1.equals(j3));
// compare id and submissionMillis
assertEquals(-1, j1.compareTo(j3));
assertEquals(-1, j1.compareTo(j4));
}
示例3: testMapTasksOnlySleepJobs
import org.apache.hadoop.tools.rumen.JobStory; //导入依赖的package包/类
@Test (timeout=600000)
public void testMapTasksOnlySleepJobs() throws Exception {
Configuration configuration = GridmixTestUtils.mrvl.getConfig();
DebugJobProducer jobProducer = new DebugJobProducer(5, configuration);
configuration.setBoolean(SleepJob.SLEEPJOB_MAPTASK_ONLY, true);
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
JobStory story;
int seq = 1;
while ((story = jobProducer.getNextJob()) != null) {
GridmixJob gridmixJob = JobCreator.SLEEPJOB.createGridmixJob(configuration, 0,
story, new Path("ignored"), ugi, seq++);
gridmixJob.buildSplits(null);
Job job = gridmixJob.call();
assertEquals(0, job.getNumReduceTasks());
}
jobProducer.close();
assertEquals(6, seq);
}
示例4: testRandomLocation
import org.apache.hadoop.tools.rumen.JobStory; //导入依赖的package包/类
private void testRandomLocation(int locations, int njobs,
UserGroupInformation ugi) throws Exception {
Configuration configuration = new Configuration();
DebugJobProducer jobProducer = new DebugJobProducer(njobs, configuration);
Configuration jconf = GridmixTestUtils.mrvl.getConfig();
jconf.setInt(JobCreator.SLEEPJOB_RANDOM_LOCATIONS, locations);
JobStory story;
int seq = 1;
while ((story = jobProducer.getNextJob()) != null) {
GridmixJob gridmixJob = JobCreator.SLEEPJOB.createGridmixJob(jconf, 0,
story, new Path("ignored"), ugi, seq++);
gridmixJob.buildSplits(null);
List<InputSplit> splits = new SleepJob.SleepInputFormat()
.getSplits(gridmixJob.getJob());
for (InputSplit split : splits) {
assertEquals(locations, split.getLocations().length);
}
}
jobProducer.close();
}
示例5: testMapTasksOnlySleepJobs
import org.apache.hadoop.tools.rumen.JobStory; //导入依赖的package包/类
@Test
public void testMapTasksOnlySleepJobs() throws Exception {
Configuration configuration = GridmixTestUtils.mrvl.getConfig();
DebugJobProducer jobProducer = new DebugJobProducer(5, configuration);
configuration.setBoolean(SleepJob.SLEEPJOB_MAPTASK_ONLY, true);
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
JobStory story;
int seq = 1;
while ((story = jobProducer.getNextJob()) != null) {
GridmixJob gridmixJob = JobCreator.SLEEPJOB.createGridmixJob(configuration, 0,
story, new Path("ignored"), ugi, seq++);
gridmixJob.buildSplits(null);
Job job = gridmixJob.call();
assertEquals(0, job.getNumReduceTasks());
}
jobProducer.close();
assertEquals(6, seq);
}
示例6: addJobStats
import org.apache.hadoop.tools.rumen.JobStory; //导入依赖的package包/类
public void addJobStats(Job job, JobStory jobdesc) {
int seq = GridmixJob.getJobSeqId(job);
if (seq < 0) {
LOG.info("Not tracking job " + job.getJobName()
+ " as seq id is less than zero: " + seq);
return;
}
int maps = 0;
if (jobdesc == null) {
throw new IllegalArgumentException(
" JobStory not available for job " + job.getJobName());
} else {
maps = jobdesc.getNumberMaps();
}
JobStats stats = new JobStats(maps,job);
jobMaps.put(seq,stats);
}
示例7: testRandomLocation
import org.apache.hadoop.tools.rumen.JobStory; //导入依赖的package包/类
private void testRandomLocation(int locations, int njobs, UserGroupInformation ugi) throws Exception {
Configuration conf = new Configuration();
conf.setInt(JobCreator.SLEEPJOB_RANDOM_LOCATIONS, locations);
DebugJobProducer jobProducer = new DebugJobProducer(njobs, conf);
JobConf jconf = GridmixTestUtils.mrCluster.createJobConf(new JobConf(conf));
JobStory story;
int seq=1;
while ((story = jobProducer.getNextJob()) != null) {
GridmixJob gridmixJob = JobCreator.SLEEPJOB.createGridmixJob(jconf, 0,
story, new Path("ignored"), ugi, seq++);
gridmixJob.buildSplits(null);
List<InputSplit> splits = new SleepJob.SleepInputFormat()
.getSplits(gridmixJob.getJob());
for (InputSplit split : splits) {
assertEquals(locations, split.getLocations().length);
}
}
}
示例8: testMapTasksOnlySleepJobs
import org.apache.hadoop.tools.rumen.JobStory; //导入依赖的package包/类
@Test
public void testMapTasksOnlySleepJobs()
throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(SleepJob.SLEEPJOB_MAPTASK_ONLY, true);
DebugJobProducer jobProducer = new DebugJobProducer(5, conf);
JobConf jconf = GridmixTestUtils.mrCluster.createJobConf(new JobConf(conf));
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
JobStory story;
int seq = 1;
while ((story = jobProducer.getNextJob()) != null) {
GridmixJob gridmixJob = JobCreator.SLEEPJOB.createGridmixJob(jconf, 0,
story, new Path("ignored"), ugi, seq++);
gridmixJob.buildSplits(null);
Job job = gridmixJob.call();
assertEquals(0, job.getNumReduceTasks());
}
}
示例9: submitJob
import org.apache.hadoop.tools.rumen.JobStory; //导入依赖的package包/类
@Override
public synchronized JobStatus submitJob(JobID jobId) throws IOException {
boolean loggingEnabled = LOG.isDebugEnabled();
if (loggingEnabled) {
LOG.debug("submitJob for jobname = " + jobId);
}
if (jobs.containsKey(jobId)) {
// job already running, don't start twice
if (loggingEnabled) {
LOG.debug("Job '" + jobId.getId() + "' already present ");
}
return jobs.get(jobId).getStatus();
}
JobStory jobStory = SimulatorJobCache.get(jobId);
if (jobStory == null) {
throw new IllegalArgumentException("Job not found in SimulatorJobCache: "+jobId);
}
validateAndSetClock(jobStory.getSubmissionTime());
SimulatorJobInProgress job = new SimulatorJobInProgress(jobId, this,
this.conf,
jobStory);
return addJob(jobId, job);
}
示例10: getNextJobFiltered
import org.apache.hadoop.tools.rumen.JobStory; //导入依赖的package包/类
/**
* Filter some jobs being fed to the simulator. For now, we filter out killed
* jobs to facilitate debugging.
*
* @throws IOException
*/
private JobStory getNextJobFiltered() throws IOException {
while (true) {
ZombieJob job = producer.getNextJob();
if (job == null) {
return null;
}
if (job.getOutcome() == Pre21JobHistoryConstants.Values.KILLED) {
continue;
}
if (job.getNumberMaps() == 0) {
continue;
}
if (job.getNumLoggedMaps() == 0) {
continue;
}
return job;
}
}
示例11: buildDistCacheFilesList
import org.apache.hadoop.tools.rumen.JobStory; //导入依赖的package包/类
/**
* Create the list of unique distributed cache files needed for all the
* simulated jobs and write the list to a special file.
* @param jsp job story producer for the trace
* @return exit code
* @throws IOException
*/
private int buildDistCacheFilesList(JobStoryProducer jsp) throws IOException {
// Read all the jobs from the trace file and build the list of unique
// distributed cache files.
JobStory jobStory;
while ((jobStory = jsp.getNextJob()) != null) {
if (jobStory.getOutcome() == Pre21JobHistoryConstants.Values.SUCCESS &&
jobStory.getSubmissionTime() >= 0) {
updateHDFSDistCacheFilesList(jobStory);
}
}
jsp.close();
return writeDistCacheFilesList();
}
示例12: getNextJobFromTrace
import org.apache.hadoop.tools.rumen.JobStory; //导入依赖的package包/类
private JobStory getNextJobFromTrace() throws IOException {
JobStory story = jobProducer.getNextJob();
if (story != null) {
++numJobsInTrace;
}
return story;
}
示例13: SleepJob
import org.apache.hadoop.tools.rumen.JobStory; //导入依赖的package包/类
public SleepJob(Configuration conf, long submissionMillis, JobStory jobdesc,
Path outRoot, UserGroupInformation ugi, int seq, int numLocations,
String[] hosts) throws IOException {
super(conf, submissionMillis, jobdesc, outRoot, ugi, seq);
this.fakeLocations = numLocations;
this.hosts = hosts.clone();
this.selector = (fakeLocations > 0)? new Selector(hosts.length, (float) fakeLocations
/ hosts.length, rand.get()) : null;
this.mapTasksOnly = conf.getBoolean(SLEEPJOB_MAPTASK_ONLY, false);
mapMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_MAP_TIME, Long.MAX_VALUE);
reduceMaxSleepTime = conf.getLong(GRIDMIX_SLEEP_MAX_REDUCE_TIME,
Long.MAX_VALUE);
}
示例14: DebugJobProducer
import org.apache.hadoop.tools.rumen.JobStory; //导入依赖的package包/类
public DebugJobProducer(int numJobs, Configuration conf) {
super();
MockJob.reset();
this.conf = conf;
this.numJobs = new AtomicInteger(numJobs);
this.submitted = new ArrayList<JobStory>();
}
示例15: getNextJob
import org.apache.hadoop.tools.rumen.JobStory; //导入依赖的package包/类
@Override
public JobStory getNextJob() throws IOException {
if (numJobs.getAndDecrement() > 0) {
final MockJob ret = new MockJob(conf);
submitted.add(ret);
return ret;
}
return null;
}