本文整理汇总了Java中org.apache.hadoop.mapred.LocalJobRunner类的典型用法代码示例。如果您正苦于以下问题:Java LocalJobRunner类的具体用法?Java LocalJobRunner怎么用?Java LocalJobRunner使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
LocalJobRunner类属于org.apache.hadoop.mapred包,在下文中一共展示了LocalJobRunner类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testInvalidMultiMapParallelism
import org.apache.hadoop.mapred.LocalJobRunner; //导入依赖的package包/类
/**
* Run a test with a misconfigured number of mappers.
* Expect failure.
*/
@Test
public void testInvalidMultiMapParallelism() throws Exception {
Job job = Job.getInstance();
Path inputPath = createMultiMapsInput();
Path outputPath = getOutputPath();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath, true);
}
job.setMapperClass(StressMapper.class);
job.setReducerClass(CountingReducer.class);
job.setNumReduceTasks(1);
LocalJobRunner.setLocalMaxRunningMaps(job, -6);
FileInputFormat.addInputPath(job, inputPath);
FileOutputFormat.setOutputPath(job, outputPath);
boolean success = job.waitForCompletion(true);
assertFalse("Job succeeded somehow", success);
}
示例2: testClusterWithLocalClientProvider
import org.apache.hadoop.mapred.LocalJobRunner; //导入依赖的package包/类
@Test
public void testClusterWithLocalClientProvider() throws Exception {
Configuration conf = new Configuration();
try {
conf.set(MRConfig.FRAMEWORK_NAME, "incorrect");
new Cluster(conf);
fail("Cluster should not be initialized with incorrect framework name");
} catch (IOException e) {
}
conf.set(MRConfig.FRAMEWORK_NAME, "local");
Cluster cluster = new Cluster(conf);
assertTrue(cluster.getClient() instanceof LocalJobRunner);
cluster.close();
}
示例3: testInvalidMultiMapParallelism
import org.apache.hadoop.mapred.LocalJobRunner; //导入依赖的package包/类
/**
* Run a test with a misconfigured number of mappers.
* Expect failure.
*/
@Test
public void testInvalidMultiMapParallelism() throws Exception {
Job job = new Job();
Path inputPath = createMultiMapsInput();
Path outputPath = getOutputPath();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath, true);
}
job.setMapperClass(StressMapper.class);
job.setReducerClass(CountingReducer.class);
job.setNumReduceTasks(1);
LocalJobRunner.setLocalMaxRunningMaps(job, -6);
FileInputFormat.addInputPath(job, inputPath);
FileOutputFormat.setOutputPath(job, outputPath);
boolean success = job.waitForCompletion(true);
assertFalse("Job succeeded somehow", success);
}
示例4: testClusterWithLocalClientProvider
import org.apache.hadoop.mapred.LocalJobRunner; //导入依赖的package包/类
@Test
public void testClusterWithLocalClientProvider() throws Exception {
Configuration conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, "local");
Cluster cluster = new Cluster(conf);
assertTrue(cluster.getClient() instanceof LocalJobRunner);
cluster.close();
}
示例5: doMultiReducerTest
import org.apache.hadoop.mapred.LocalJobRunner; //导入依赖的package包/类
/**
* Run a test which creates a SequenceMapper / IdentityReducer
* job over a set of generated number files.
*/
private void doMultiReducerTest(int numMaps, int numReduces,
int parallelMaps, int parallelReduces) throws Exception {
Path in = getNumberDirPath();
Path out = getOutputPath();
// Clear data from any previous tests.
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(out)) {
fs.delete(out, true);
}
if (fs.exists(in)) {
fs.delete(in, true);
}
for (int i = 0; i < numMaps; i++) {
makeNumberFile(i, 100);
}
Job job = Job.getInstance();
job.setNumReduceTasks(numReduces);
job.setMapperClass(SequenceMapper.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
FileInputFormat.addInputPath(job, in);
FileOutputFormat.setOutputPath(job, out);
LocalJobRunner.setLocalMaxRunningMaps(job, parallelMaps);
LocalJobRunner.setLocalMaxRunningReduces(job, parallelReduces);
boolean result = job.waitForCompletion(true);
assertTrue("Job failed!!", result);
verifyNumberJob(numMaps);
}
示例6: testMultiMaps
import org.apache.hadoop.mapred.LocalJobRunner; //导入依赖的package包/类
/**
* Run a test with several mappers in parallel, operating at different
* speeds. Verify that the correct amount of output is created.
*/
@Test
public void testMultiMaps() throws Exception {
Job job = new Job();
Path inputPath = createMultiMapsInput();
Path outputPath = getOutputPath();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath, true);
}
job.setMapperClass(StressMapper.class);
job.setReducerClass(CountingReducer.class);
job.setNumReduceTasks(1);
LocalJobRunner.setLocalMaxRunningMaps(job, 6);
job.getConfiguration().set("io.sort.record.pct", "0.50");
job.getConfiguration().set("io.sort.mb", "25");
FileInputFormat.addInputPath(job, inputPath);
FileOutputFormat.setOutputPath(job, outputPath);
job.waitForCompletion(true);
verifyOutput(outputPath);
}
示例7: testMultiMaps
import org.apache.hadoop.mapred.LocalJobRunner; //导入依赖的package包/类
/**
* Run a test with several mappers in parallel, operating at different
* speeds. Verify that the correct amount of output is created.
*/
@Test
public void testMultiMaps() throws Exception {
Path inputPath = createMultiMapsInput();
Path outputPath = getOutputPath();
Configuration conf = new Configuration();
conf.setBoolean("mapred.localrunner.sequential", false);
conf.setBoolean("mapred.localrunner.debug", true);
conf.setInt(LocalJobRunner.LOCAL_RUNNER_SLOTS, 6);
conf.set(JobConf.MAPRED_TASK_JAVA_OPTS, "-DtestProperty=testValue");
Job job = new Job(conf);
job.setMapperClass(StressMapper.class);
job.setReducerClass(CountingReducer.class);
job.setNumReduceTasks(1);
job.getConfiguration().set("io.sort.record.pct", "0.50");
job.getConfiguration().set("io.sort.mb", "25");
FileInputFormat.addInputPath(job, inputPath);
FileOutputFormat.setOutputPath(job, outputPath);
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath, true);
}
job.waitForCompletion(true);
TaskCompletionEvent[] taskCompletionEvents = job.getTaskCompletionEvents(0);
assertTrue(taskCompletionEvents.length == 6);
for(int i = 0; i < 6; i++) {
assertTrue(taskCompletionEvents[i].getTaskStatus() == TaskCompletionEvent.Status.SUCCEEDED);
}
verifyOutput(outputPath);
}
示例8: testMultiMaps
import org.apache.hadoop.mapred.LocalJobRunner; //导入依赖的package包/类
/**
* Run a test with several mappers in parallel, operating at different
* speeds. Verify that the correct amount of output is created.
*/
@Test
public void testMultiMaps() throws Exception {
Path inputPath = createMultiMapsInput();
Path outputPath = getOutputPath();
Configuration conf = new Configuration();
conf.setBoolean("mapred.localrunner.sequential", false);
conf.setBoolean("mapred.localrunner.debug", true);
conf.setInt(LocalJobRunner.LOCAL_RUNNER_SLOTS, 6);
conf.set(JobConf.MAPRED_TASK_JAVA_OPTS, "-DtestProperty=testValue");
Job job = new Job(conf);
job.setMapperClass(StressMapper.class);
job.setReducerClass(CountingReducer.class);
job.setNumReduceTasks(1);
job.getConfiguration().set("io.sort.record.pct", "0.50");
job.getConfiguration().set("io.sort.mb", "25");
FileInputFormat.addInputPath(job, inputPath);
FileOutputFormat.setOutputPath(job, outputPath);
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath, true);
}
job.waitForCompletion(true);
verifyOutput(outputPath);
}
示例9: close
import org.apache.hadoop.mapred.LocalJobRunner; //导入依赖的package包/类
/**
* Close the <code>Cluster</code>.
*/
public synchronized void close() throws IOException {
if (!(client instanceof LocalJobRunner)) {
RPC.stopProxy(client);
}
}