本文整理汇总了Java中org.apache.hadoop.tools.rumen.ZombieJobProducer类的典型用法代码示例。如果您正苦于以下问题:Java ZombieJobProducer类的具体用法?Java ZombieJobProducer怎么用?Java ZombieJobProducer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ZombieJobProducer类属于org.apache.hadoop.tools.rumen包,在下文中一共展示了ZombieJobProducer类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testSerialReaderThread
import org.apache.hadoop.tools.rumen.ZombieJobProducer; //导入依赖的package包/类
@Test (timeout=120000)
public void testSerialReaderThread() throws Exception {
Configuration conf = new Configuration();
File fin = new File("src" + File.separator + "test" + File.separator
+ "resources" + File.separator + "data" + File.separator
+ "wordcount2.json");
// read couple jobs from wordcount2.json
JobStoryProducer jobProducer = new ZombieJobProducer(new Path(
fin.getAbsolutePath()), null, conf);
CountDownLatch startFlag = new CountDownLatch(1);
UserResolver resolver = new SubmitterUserResolver();
FakeJobSubmitter submitter = new FakeJobSubmitter();
File ws = new File("target" + File.separator + this.getClass().getName());
if (!ws.exists()) {
Assert.assertTrue(ws.mkdirs());
}
SerialJobFactory jobFactory = new SerialJobFactory(submitter, jobProducer,
new Path(ws.getAbsolutePath()), conf, startFlag, resolver);
Path ioPath = new Path(ws.getAbsolutePath());
jobFactory.setDistCacheEmulator(new DistributedCacheEmulator(conf, ioPath));
Thread test = jobFactory.createReaderThread();
test.start();
Thread.sleep(1000);
// SerialReaderThread waits startFlag
assertEquals(0, submitter.getJobs().size());
// start!
startFlag.countDown();
while (test.isAlive()) {
Thread.sleep(1000);
jobFactory.update(null);
}
// submitter was called twice
assertEquals(2, submitter.getJobs().size());
}
示例2: createJobFactory
import org.apache.hadoop.tools.rumen.ZombieJobProducer; //导入依赖的package包/类
protected JobFactory createJobFactory(
JobSubmitter submitter, String traceIn, Path scratchDir, Configuration conf,
CountDownLatch startFlag, UserResolver resolver)
throws IOException {
return GridmixJobSubmissionPolicy.getPolicy(
conf, GridmixJobSubmissionPolicy.STRESS).createJobFactory(
submitter, new ZombieJobProducer(
createInputStream(
traceIn), null), scratchDir, conf, startFlag, resolver);
}
示例3: SortedZombieJobProducer
import org.apache.hadoop.tools.rumen.ZombieJobProducer; //导入依赖的package包/类
public SortedZombieJobProducer(Path path, ZombieCluster cluster,
Configuration conf, int bufferSize)
throws IOException {
producer = new ZombieJobProducer(path, cluster, conf);
jobBufferSize = bufferSize;
initBuffer();
}
示例4: getNumberJobs
import org.apache.hadoop.tools.rumen.ZombieJobProducer; //导入依赖的package包/类
private int getNumberJobs(Path inputFile, Configuration conf)
throws IOException {
ZombieJobProducer jobProducer = new ZombieJobProducer(inputFile, null, conf);
try {
int numJobs = 0;
while (jobProducer.getNextJob() != null) {
++numJobs;
}
return numJobs;
} finally {
jobProducer.close();
}
}
示例5: testSerialReaderThread
import org.apache.hadoop.tools.rumen.ZombieJobProducer; //导入依赖的package包/类
@Test (timeout=40000)
public void testSerialReaderThread() throws Exception {
Configuration conf = new Configuration();
File fin = new File("src" + File.separator + "test" + File.separator
+ "resources" + File.separator + "data" + File.separator
+ "wordcount2.json");
// read couple jobs from wordcount2.json
JobStoryProducer jobProducer = new ZombieJobProducer(new Path(
fin.getAbsolutePath()), null, conf);
CountDownLatch startFlag = new CountDownLatch(1);
UserResolver resolver = new SubmitterUserResolver();
FakeJobSubmitter submitter = new FakeJobSubmitter();
File ws = new File("target" + File.separator + this.getClass().getName());
if (!ws.exists()) {
Assert.assertTrue(ws.mkdirs());
}
SerialJobFactory jobFactory = new SerialJobFactory(submitter, jobProducer,
new Path(ws.getAbsolutePath()), conf, startFlag, resolver);
Path ioPath = new Path(ws.getAbsolutePath());
jobFactory.setDistCacheEmulator(new DistributedCacheEmulator(conf, ioPath));
Thread test = jobFactory.createReaderThread();
test.start();
Thread.sleep(1000);
// SerialReaderThread waits startFlag
assertEquals(0, submitter.getJobs().size());
// start!
startFlag.countDown();
while (test.isAlive()) {
Thread.sleep(1000);
jobFactory.update(null);
}
// submitter was called twice
assertEquals(2, submitter.getJobs().size());
}
示例6: buildJobStories
import org.apache.hadoop.tools.rumen.ZombieJobProducer; //导入依赖的package包/类
private Map<JobID, ZombieJob> buildJobStories() throws IOException {
ZombieJobProducer zjp = new ZombieJobProducer(path,null, conf);
Map<JobID, ZombieJob> hm = new HashMap<JobID, ZombieJob>();
ZombieJob zj = zjp.getNextJob();
while (zj != null) {
hm.put(zj.getJobID(),zj);
zj = zjp.getNextJob();
}
if (hm.size() == 0) {
return null;
} else {
return hm;
}
}
示例7: SimulatorJobStoryProducer
import org.apache.hadoop.tools.rumen.ZombieJobProducer; //导入依赖的package包/类
public SimulatorJobStoryProducer(Path path, ZombieCluster cluster,
long firstJobStartTime, Configuration conf, long seed) throws IOException {
producer = new ZombieJobProducer(path, cluster, conf, seed);
this.firstJobStartTime = firstJobStartTime;
}
示例8: createJobStoryProducer
import org.apache.hadoop.tools.rumen.ZombieJobProducer; //导入依赖的package包/类
/**
* Create an appropriate {@code JobStoryProducer} object for the
* given trace.
*
* @param traceIn the path to the trace file. The special path
* "-" denotes the standard input stream.
*
* @param conf the configuration to be used.
*
* @throws IOException if there was an error.
*/
protected JobStoryProducer createJobStoryProducer(String traceIn,
Configuration conf) throws IOException {
if ("-".equals(traceIn)) {
return new ZombieJobProducer(System.in, null);
}
return new ZombieJobProducer(new Path(traceIn), null, conf);
}
示例9: JobFactory
import org.apache.hadoop.tools.rumen.ZombieJobProducer; //导入依赖的package包/类
/**
* Creating a new instance does not start the thread.
* @param submitter Component to which deserialized jobs are passed
* @param jobTrace Stream of job traces with which to construct a
* {@link org.apache.hadoop.tools.rumen.ZombieJobProducer}
* @param scratch Directory into which to write output from simulated jobs
* @param conf Config passed to all jobs to be submitted
* @param startFlag Latch released from main to start pipeline
* @throws java.io.IOException
*/
public JobFactory(JobSubmitter submitter, InputStream jobTrace,
Path scratch, Configuration conf, CountDownLatch startFlag,
UserResolver userResolver) throws IOException {
this(submitter, new ZombieJobProducer(jobTrace, null), scratch, conf,
startFlag, userResolver);
}
示例10: JobFactory
import org.apache.hadoop.tools.rumen.ZombieJobProducer; //导入依赖的package包/类
/**
* Creating a new instance does not start the thread.
* @param submitter Component to which deserialized jobs are passed
* @param jobTrace Stream of job traces with which to construct a
* {@link org.apache.hadoop.tools.rumen.ZombieJobProducer}
* @param scratch Directory into which to write output from simulated jobs
* @param conf Config passed to all jobs to be submitted
* @param startFlag Latch released from main to start pipeline
*/
public JobFactory(JobSubmitter submitter, InputStream jobTrace,
Path scratch, Configuration conf, CountDownLatch startFlag)
throws IOException {
this(submitter, new ZombieJobProducer(jobTrace, null), scratch, conf,
startFlag);
}