当前位置: 首页>>代码示例>>Java>>正文


Java MapReduceTestUtil.readOutput方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.MapReduceTestUtil.readOutput方法的典型用法代码示例。如果您正苦于以下问题:Java MapReduceTestUtil.readOutput方法的具体用法?Java MapReduceTestUtil.readOutput怎么用?Java MapReduceTestUtil.readOutput使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapreduce.MapReduceTestUtil的用法示例。


在下文中一共展示了MapReduceTestUtil.readOutput方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: launchWordCount

import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
public static TestResult launchWordCount(JobConf conf,
                                         Path inDir,
                                         Path outDir,
                                         String input,
                                         int numMaps,
                                         int numReduces,
                                         String sysDir) throws IOException {
  FileSystem inFs = inDir.getFileSystem(conf);
  FileSystem outFs = outDir.getFileSystem(conf);
  outFs.delete(outDir, true);
  if (!inFs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  {
    DataOutputStream file = inFs.create(new Path(inDir, "part-0"));
    file.writeBytes(input);
    file.close();
  }
  conf.setJobName("wordcount");
  conf.setInputFormat(TextInputFormat.class);
  
  // the keys are words (strings)
  conf.setOutputKeyClass(Text.class);
  // the values are counts (ints)
  conf.setOutputValueClass(IntWritable.class);
  
  conf.setMapperClass(WordCount.MapClass.class);        
  conf.setCombinerClass(WordCount.Reduce.class);
  conf.setReducerClass(WordCount.Reduce.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReduces);
  conf.set(JTConfig.JT_SYSTEM_DIR, "/tmp/subru/mapred/system");
  JobClient jobClient = new JobClient(conf);
  RunningJob job = jobClient.runJob(conf);
  // Checking that the Job Client system dir is not used
  assertFalse(FileSystem.get(conf).exists(
    new Path(conf.get(JTConfig.JT_SYSTEM_DIR)))); 
  // Check if the Job Tracker system dir is propogated to client
  assertFalse(sysDir.contains("/tmp/subru/mapred/system"));
  assertTrue(sysDir.contains("custom"));
  return new TestResult(job, MapReduceTestUtil.readOutput(outDir, conf));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:45,代码来源:TestJobSysDirWithDFS.java

示例2: runStreamJobAndValidateEnv

import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
/**
 * Runs the streaming job and validates the output.
 * @throws IOException
 */
private void runStreamJobAndValidateEnv() throws IOException {
  int returnStatus = -1;
  boolean mayExit = false;
  StreamJob job = new StreamJob(genArgs(), mayExit);
  returnStatus = job.go();
  assertEquals("StreamJob failed.", 0, returnStatus);
  
  // validate environment variables set for the child(script) of java process
  String env = MapReduceTestUtil.readOutput(outputPath, mr.createJobConf());
  long logSize = USERLOG_LIMIT_KB * 1024;
  assertTrue("environment set for child is wrong", env.contains("INFO,CLA")
             && env.contains("-Dyarn.app.container.log.dir=")
             && env.contains("-Dyarn.app.container.log.filesize=" + logSize)
             && env.contains("-Dlog4j.configuration="));
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:20,代码来源:TestStreamingTaskLog.java

示例3: runStreamJobAndValidateEnv

import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
/**
 * Runs the streaming job and validates the output.
 * @throws IOException
 */
private void runStreamJobAndValidateEnv() throws IOException {
  int returnStatus = -1;
  boolean mayExit = false;
  StreamJob job = new StreamJob(genArgs(), mayExit);
  returnStatus = job.go();
  assertEquals("StreamJob failed.", 0, returnStatus);
  
  // validate environment variables set for the child(script) of java process
  String env = MapReduceTestUtil.readOutput(outputPath, mr.createJobConf());
  long logSize = USERLOG_LIMIT_KB * 1024;
  assertTrue("environment set for child is wrong", env.contains("INFO,TLA")
             && env.contains("-Dhadoop.tasklog.taskid=attempt_")
             && env.contains("-Dhadoop.tasklog.totalLogFileSize=" + logSize)
             && env.contains("-Dhadoop.tasklog.iscleanup=false"));
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:20,代码来源:TestStreamingTaskLog.java

示例4: launch

import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
public static void launch() throws Exception {
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  int numOfInputLines = 10;

  Path outDir = new Path(testDir, "output_for_field_selection_test");
  Path inDir = new Path(testDir, "input_for_field_selection_test");

  StringBuffer inputData = new StringBuffer();
  StringBuffer expectedOutput = new StringBuffer();
  constructInputOutputData(inputData, expectedOutput, numOfInputLines);
  
  conf.set(FieldSelectionHelper.DATA_FIELD_SEPERATOR, "-");
  conf.set(FieldSelectionHelper.MAP_OUTPUT_KEY_VALUE_SPEC, "6,5,1-3:0-");
  conf.set(
    FieldSelectionHelper.REDUCE_OUTPUT_KEY_VALUE_SPEC, ":4,3,2,1,0,0-");
  Job job = MapReduceTestUtil.createJob(conf, inDir, outDir,
    1, 1, inputData.toString());
  job.setMapperClass(FieldSelectionMapper.class);
  job.setReducerClass(FieldSelectionReducer.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  job.setNumReduceTasks(1);

  job.waitForCompletion(true);
  assertTrue("Job Failed!", job.isSuccessful());

  //
  // Finally, we compare the reconstructed answer key with the
  // original one.  Remember, we need to ignore zero-count items
  // in the original key.
  //
  String outdata = MapReduceTestUtil.readOutput(outDir, conf);
  assertEquals("Outputs doesnt match.",expectedOutput.toString(), outdata);
  fs.delete(outDir, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestMRFieldSelection.java

示例5: runCustomFormats

import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
private void runCustomFormats(MiniMRCluster mr) throws IOException {
  JobConf job = mr.createJobConf();
  FileSystem fileSys = FileSystem.get(job);
  Path testDir = new Path(TEST_ROOT_DIR + "/test_mini_mr_local");
  Path outDir = new Path(testDir, "out");
  System.out.println("testDir= " + testDir);
  fileSys.delete(testDir, true);
  
  job.setInputFormat(MyInputFormat.class);
  job.setOutputFormat(MyOutputFormat.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(IntWritable.class);
  
  job.setMapperClass(MyMapper.class);        
  job.setReducerClass(MyReducer.class);
  job.setNumMapTasks(100);
  job.setNumReduceTasks(1);
  // explicitly do not use "normal" job.setOutputPath to make sure
  // that it is not hardcoded anywhere in the framework.
  job.set("non.std.out", outDir.toString());
  try {
    JobClient.runJob(job);
    String result = 
      MapReduceTestUtil.readOutput(outDir, job);
    assertEquals("output", ("aunt annie\t1\n" +
                            "bumble boat\t4\n" +
                            "crocodile pants\t0\n" +
                            "duck-dog\t5\n"+
                            "eggs\t2\n" + 
                            "finagle the agent\t3\n"), result);
  } finally {
    fileSys.delete(testDir, true);
  }
  
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:36,代码来源:TestMiniMRLocalFS.java

示例6: launchWordCount

import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
public static TestResult launchWordCount(JobConf conf,
                                         Path inDir,
                                         Path outDir,
                                         String input,
                                         int numMaps,
                                         int numReduces) throws IOException {
  FileSystem inFs = inDir.getFileSystem(conf);
  FileSystem outFs = outDir.getFileSystem(conf);
  outFs.delete(outDir, true);
  if (!inFs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  {
    DataOutputStream file = inFs.create(new Path(inDir, "part-0"));
    file.writeBytes(input);
    file.close();
  }
  conf.setJobName("wordcount");
  conf.setInputFormat(TextInputFormat.class);
  
  // the keys are words (strings)
  conf.setOutputKeyClass(Text.class);
  // the values are counts (ints)
  conf.setOutputValueClass(IntWritable.class);
  
  conf.setMapperClass(WordCount.MapClass.class);        
  conf.setCombinerClass(WordCount.Reduce.class);
  conf.setReducerClass(WordCount.Reduce.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReduces);
  RunningJob job = JobClient.runJob(conf);
  return new TestResult(job, MapReduceTestUtil.readOutput(outDir, conf));
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:36,代码来源:TestMiniMRWithDFS.java

示例7: launch

import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
public static void launch() throws Exception {
  JobConf conf = new JobConf(TestFieldSelection.class);
  FileSystem fs = FileSystem.get(conf);
  int numOfInputLines = 10;

  Path OUTPUT_DIR = new Path("build/test/output_for_field_selection_test");
  Path INPUT_DIR = new Path("build/test/input_for_field_selection_test");
  String inputFile = "input.txt";
  fs.delete(INPUT_DIR, true);
  fs.mkdirs(INPUT_DIR);
  fs.delete(OUTPUT_DIR, true);

  StringBuffer inputData = new StringBuffer();
  StringBuffer expectedOutput = new StringBuffer();

  TestMRFieldSelection.constructInputOutputData(inputData,
    expectedOutput, numOfInputLines);
  FSDataOutputStream fileOut = fs.create(new Path(INPUT_DIR, inputFile));
  fileOut.write(inputData.toString().getBytes("utf-8"));
  fileOut.close();

  System.out.println("inputData:");
  System.out.println(inputData.toString());
  JobConf job = new JobConf(conf, TestFieldSelection.class);
  FileInputFormat.setInputPaths(job, INPUT_DIR);
  job.setInputFormat(TextInputFormat.class);
  job.setMapperClass(FieldSelectionMapReduce.class);
  job.setReducerClass(FieldSelectionMapReduce.class);

  FileOutputFormat.setOutputPath(job, OUTPUT_DIR);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  job.setOutputFormat(TextOutputFormat.class);
  job.setNumReduceTasks(1);

  job.set(FieldSelectionHelper.DATA_FIELD_SEPERATOR, "-");
  job.set(FieldSelectionHelper.MAP_OUTPUT_KEY_VALUE_SPEC, "6,5,1-3:0-");
  job.set(FieldSelectionHelper.REDUCE_OUTPUT_KEY_VALUE_SPEC, ":4,3,2,1,0,0-");

  JobClient.runJob(job);

  //
  // Finally, we compare the reconstructed answer key with the
  // original one.  Remember, we need to ignore zero-count items
  // in the original key.
  //
  boolean success = true;
  Path outPath = new Path(OUTPUT_DIR, "part-00000");
  String outdata = MapReduceTestUtil.readOutput(outPath,job);

  assertEquals(expectedOutput.toString(),outdata);
  fs.delete(OUTPUT_DIR, true);
  fs.delete(INPUT_DIR, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:55,代码来源:TestFieldSelection.java

示例8: launch

import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
public static void launch() throws Exception {
  JobConf conf = new JobConf(TestAggregates.class);
  FileSystem fs = FileSystem.get(conf);
  int numOfInputLines = 20;

  Path OUTPUT_DIR = new Path("build/test/output_for_aggregates_test");
  Path INPUT_DIR = new Path("build/test/input_for_aggregates_test");
  String inputFile = "input.txt";
  fs.delete(INPUT_DIR, true);
  fs.mkdirs(INPUT_DIR);
  fs.delete(OUTPUT_DIR, true);

  StringBuffer inputData = new StringBuffer();
  StringBuffer expectedOutput = new StringBuffer();
  expectedOutput.append("max\t19\n");
  expectedOutput.append("min\t1\n"); 

  FSDataOutputStream fileOut = fs.create(new Path(INPUT_DIR, inputFile));
  for (int i = 1; i < numOfInputLines; i++) {
    expectedOutput.append("count_").append(idFormat.format(i));
    expectedOutput.append("\t").append(i).append("\n");

    inputData.append(idFormat.format(i));
    for (int j = 1; j < i; j++) {
      inputData.append(" ").append(idFormat.format(i));
    }
    inputData.append("\n");
  }
  expectedOutput.append("value_as_string_max\t9\n");
  expectedOutput.append("value_as_string_min\t1\n");
  expectedOutput.append("uniq_count\t15\n");


  fileOut.write(inputData.toString().getBytes("utf-8"));
  fileOut.close();

  System.out.println("inputData:");
  System.out.println(inputData.toString());
  JobConf job = new JobConf(conf, TestAggregates.class);
  FileInputFormat.setInputPaths(job, INPUT_DIR);
  job.setInputFormat(TextInputFormat.class);

  FileOutputFormat.setOutputPath(job, OUTPUT_DIR);
  job.setOutputFormat(TextOutputFormat.class);
  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(Text.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  job.setNumReduceTasks(1);

  job.setMapperClass(ValueAggregatorMapper.class);
  job.setReducerClass(ValueAggregatorReducer.class);
  job.setCombinerClass(ValueAggregatorCombiner.class);

  job.setInt("aggregator.descriptor.num", 1);
  job.set("aggregator.descriptor.0", 
        "UserDefined,org.apache.hadoop.mapred.lib.aggregate.AggregatorTests");
  job.setLong("aggregate.max.num.unique.values", 14);

  JobClient.runJob(job);

  //
  // Finally, we compare the reconstructed answer key with the
  // original one.  Remember, we need to ignore zero-count items
  // in the original key.
  //
  boolean success = true;
  Path outPath = new Path(OUTPUT_DIR, "part-00000");
  String outdata = MapReduceTestUtil.readOutput(outPath,job);
  System.out.println("full out data:");
  System.out.println(outdata.toString());
  outdata = outdata.substring(0, expectedOutput.toString().length());

  assertEquals(expectedOutput.toString(),outdata);
  //fs.delete(OUTPUT_DIR);
  fs.delete(INPUT_DIR, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:78,代码来源:TestAggregates.java

示例9: launch

import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
public static void launch() throws Exception {
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  int numOfInputLines = 20;

  Path OUTPUT_DIR = new Path("build/test/output_for_aggregates_test");
  Path INPUT_DIR = new Path("build/test/input_for_aggregates_test");
  String inputFile = "input.txt";
  fs.delete(INPUT_DIR, true);
  fs.mkdirs(INPUT_DIR);
  fs.delete(OUTPUT_DIR, true);

  StringBuffer inputData = new StringBuffer();
  StringBuffer expectedOutput = new StringBuffer();
  expectedOutput.append("max\t19\n");
  expectedOutput.append("min\t1\n"); 

  FSDataOutputStream fileOut = fs.create(new Path(INPUT_DIR, inputFile));
  for (int i = 1; i < numOfInputLines; i++) {
    expectedOutput.append("count_").append(idFormat.format(i));
    expectedOutput.append("\t").append(i).append("\n");

    inputData.append(idFormat.format(i));
    for (int j = 1; j < i; j++) {
      inputData.append(" ").append(idFormat.format(i));
    }
    inputData.append("\n");
  }
  expectedOutput.append("value_as_string_max\t9\n");
  expectedOutput.append("value_as_string_min\t1\n");
  expectedOutput.append("uniq_count\t15\n");


  fileOut.write(inputData.toString().getBytes("utf-8"));
  fileOut.close();

  System.out.println("inputData:");
  System.out.println(inputData.toString());

  conf.setInt(ValueAggregatorJobBase.DESCRIPTOR_NUM, 1);
  conf.set(ValueAggregatorJobBase.DESCRIPTOR + ".0", 
    "UserDefined,org.apache.hadoop.mapreduce.lib.aggregate.AggregatorTests");
  conf.setLong(UniqValueCount.MAX_NUM_UNIQUE_VALUES, 14);
  
  Job job = Job.getInstance(conf);
  FileInputFormat.setInputPaths(job, INPUT_DIR);
  job.setInputFormatClass(TextInputFormat.class);
  FileOutputFormat.setOutputPath(job, OUTPUT_DIR);
  job.setOutputFormatClass(TextOutputFormat.class);
  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(Text.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  job.setNumReduceTasks(1);
  job.setMapperClass(ValueAggregatorMapper.class);
  job.setReducerClass(ValueAggregatorReducer.class);
  job.setCombinerClass(ValueAggregatorCombiner.class);


  job.waitForCompletion(true);

  assertTrue(job.isSuccessful());
  //
  // Finally, we compare the reconstructed answer key with the
  // original one.  Remember, we need to ignore zero-count items
  // in the original key.
  //
  String outdata = MapReduceTestUtil.readOutput(OUTPUT_DIR, conf);
  System.out.println("full out data:");
  System.out.println(outdata.toString());
  outdata = outdata.substring(0, expectedOutput.toString().length());

  assertEquals(expectedOutput.toString(),outdata);
  fs.delete(OUTPUT_DIR, true);
  fs.delete(INPUT_DIR, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:77,代码来源:TestMapReduceAggregates.java

示例10: launchWordCount

import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
private String launchWordCount(JobConf conf,
                               String input,
                               int numMaps,
                               int numReduces) throws IOException {
  Path inDir = new Path("testing/wc/input");
  Path outDir = new Path("testing/wc/output");

  // Hack for local FS that does not have the concept of a 'mounting point'
  if (isLocalFS()) {
    String localPathRoot = System.getProperty("test.build.data","/tmp")
      .toString().replace(' ', '+');;
    inDir = new Path(localPathRoot, inDir);
    outDir = new Path(localPathRoot, outDir);
  }

  FileSystem fs = FileSystem.get(conf);
  fs.delete(outDir, true);
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  {
    DataOutputStream file = fs.create(new Path(inDir, "part-0"));
    file.writeBytes(input);
    file.close();
  }
  conf.setJobName("wordcount");
  conf.setInputFormat(TextInputFormat.class);

  // the keys are words (strings)
  conf.setOutputKeyClass(Text.class);
  // the values are counts (ints)
  conf.setOutputValueClass(IntWritable.class);

  conf.setMapperClass(WordCount.MapClass.class);
  conf.setCombinerClass(WordCount.Reduce.class);
  conf.setReducerClass(WordCount.Reduce.class);

  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReduces);
  JobClient.runJob(conf);
  return MapReduceTestUtil.readOutput(outDir, conf);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:45,代码来源:NotificationTestCase.java

示例11: launch

import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
public static void launch() throws Exception {
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  int numOfInputLines = 20;

  String baseDir = System.getProperty("test.build.data", "build/test/data");
  Path OUTPUT_DIR = new Path(baseDir + "/output_for_aggregates_test");
  Path INPUT_DIR = new Path(baseDir + "/input_for_aggregates_test");
  String inputFile = "input.txt";
  fs.delete(INPUT_DIR, true);
  fs.mkdirs(INPUT_DIR);
  fs.delete(OUTPUT_DIR, true);

  StringBuffer inputData = new StringBuffer();
  StringBuffer expectedOutput = new StringBuffer();
  expectedOutput.append("max\t19\n");
  expectedOutput.append("min\t1\n"); 

  FSDataOutputStream fileOut = fs.create(new Path(INPUT_DIR, inputFile));
  for (int i = 1; i < numOfInputLines; i++) {
    expectedOutput.append("count_").append(idFormat.format(i));
    expectedOutput.append("\t").append(i).append("\n");

    inputData.append(idFormat.format(i));
    for (int j = 1; j < i; j++) {
      inputData.append(" ").append(idFormat.format(i));
    }
    inputData.append("\n");
  }
  expectedOutput.append("value_as_string_max\t9\n");
  expectedOutput.append("value_as_string_min\t1\n");
  expectedOutput.append("uniq_count\t15\n");


  fileOut.write(inputData.toString().getBytes("utf-8"));
  fileOut.close();

  System.out.println("inputData:");
  System.out.println(inputData.toString());

  conf.setInt(ValueAggregatorJobBase.DESCRIPTOR_NUM, 1);
  conf.set(ValueAggregatorJobBase.DESCRIPTOR + ".0", 
    "UserDefined,org.apache.hadoop.mapreduce.lib.aggregate.AggregatorTests");
  conf.setLong(UniqValueCount.MAX_NUM_UNIQUE_VALUES, 14);
  
  Job job = Job.getInstance(conf);
  FileInputFormat.setInputPaths(job, INPUT_DIR);
  job.setInputFormatClass(TextInputFormat.class);
  FileOutputFormat.setOutputPath(job, OUTPUT_DIR);
  job.setOutputFormatClass(TextOutputFormat.class);
  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(Text.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  job.setNumReduceTasks(1);
  job.setMapperClass(ValueAggregatorMapper.class);
  job.setReducerClass(ValueAggregatorReducer.class);
  job.setCombinerClass(ValueAggregatorCombiner.class);


  job.waitForCompletion(true);

  assertTrue(job.isSuccessful());
  //
  // Finally, we compare the reconstructed answer key with the
  // original one.  Remember, we need to ignore zero-count items
  // in the original key.
  //
  String outdata = MapReduceTestUtil.readOutput(OUTPUT_DIR, conf);
  System.out.println("full out data:");
  System.out.println(outdata.toString());
  outdata = outdata.substring(0, expectedOutput.toString().length());

  assertEquals(expectedOutput.toString(),outdata);
  fs.delete(OUTPUT_DIR, true);
  fs.delete(INPUT_DIR, true);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:78,代码来源:TestMapReduceAggregates.java

示例12: launchWordCount

import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
public static TestResult launchWordCount(JobConf conf,
                                         Path inDir,
                                         Path outDir,
                                         String input,
                                         int numMaps,
                                         int numReduces,
                                         String sysDir) throws IOException {
  FileSystem inFs = inDir.getFileSystem(conf);
  FileSystem outFs = outDir.getFileSystem(conf);
  outFs.delete(outDir, true);
  if (!inFs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  {
    DataOutputStream file = inFs.create(new Path(inDir, "part-0"));
    file.writeBytes(input);
    file.close();
  }
  conf.setJobName("wordcount");
  conf.setInputFormat(TextInputFormat.class);
  
  // the keys are words (strings)
  conf.setOutputKeyClass(Text.class);
  // the values are counts (ints)
  conf.setOutputValueClass(IntWritable.class);
  
  conf.setMapperClass(WordCount.MapClass.class);        
  conf.setCombinerClass(WordCount.Reduce.class);
  conf.setReducerClass(WordCount.Reduce.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReduces);
  conf.set("mapred.system.dir", "/tmp/subru/mapred/system");
  JobClient jobClient = new JobClient(conf);
  RunningJob job = jobClient.runJob(conf);
  // Checking that the Job Client system dir is not used
  assertFalse(FileSystem.get(conf).exists(new Path(conf.get("mapred.system.dir")))); 
  // Check if the Job Tracker system dir is propogated to client
  sysDir = jobClient.getSystemDir().toString();
  System.out.println("Job sys dir -->" + sysDir);
  assertFalse(sysDir.contains("/tmp/subru/mapred/system"));
  assertTrue(sysDir.contains("custom"));
  return new TestResult(job, MapReduceTestUtil.readOutput(outDir, conf));
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:46,代码来源:TestJobSysDirWithDFS.java


注:本文中的org.apache.hadoop.mapreduce.MapReduceTestUtil.readOutput方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。