當前位置: 首頁>>代碼示例>>Java>>正文


Java JobConf.setOutputKeyClass方法代碼示例

本文整理匯總了Java中org.apache.hadoop.mapred.JobConf.setOutputKeyClass方法的典型用法代碼示例。如果您正苦於以下問題:Java JobConf.setOutputKeyClass方法的具體用法?Java JobConf.setOutputKeyClass怎麽用?Java JobConf.setOutputKeyClass使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.mapred.JobConf的用法示例。


在下文中一共展示了JobConf.setOutputKeyClass方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: runTests

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Run the test
 * 
 * @throws IOException on error
 */
public static void runTests() throws IOException {
  config.setLong("io.bytes.per.checksum", bytesPerChecksum);
  
  JobConf job = new JobConf(config, NNBench.class);

  job.setJobName("NNBench-" + operation);
  FileInputFormat.setInputPaths(job, new Path(baseDir, CONTROL_DIR_NAME));
  job.setInputFormat(SequenceFileInputFormat.class);
  
  // Explicitly set number of max map attempts to 1.
  job.setMaxMapAttempts(1);
  
  // Explicitly turn off speculative execution
  job.setSpeculativeExecution(false);

  job.setMapperClass(NNBenchMapper.class);
  job.setReducerClass(NNBenchReducer.class);

  FileOutputFormat.setOutputPath(job, new Path(baseDir, OUTPUT_DIR_NAME));
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  job.setNumReduceTasks((int) numberOfReduces);
  JobClient.runJob(job);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:30,代碼來源:NNBench.java

示例2: runIOTest

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
private void runIOTest(
        Class<? extends Mapper<Text, LongWritable, Text, Text>> mapperClass, 
        Path outputDir) throws IOException {
  JobConf job = new JobConf(config, TestDFSIO.class);

  FileInputFormat.setInputPaths(job, getControlDir(config));
  job.setInputFormat(SequenceFileInputFormat.class);

  job.setMapperClass(mapperClass);
  job.setReducerClass(AccumulatingReducer.class);

  FileOutputFormat.setOutputPath(job, outputDir);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  job.setNumReduceTasks(1);
  JobClient.runJob(job);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:TestDFSIO.java

示例3: getJob

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Sets up a job conf for the given job using the given config object. Ensures
 * that the correct input format is set, the mapper and and reducer class and
 * the input and output keys and value classes along with any other job
 * configuration.
 * 
 * @param config
 * @return JobConf representing the job to be ran
 * @throws IOException
 */
private JobConf getJob(ConfigExtractor config) throws IOException {
  JobConf job = new JobConf(config.getConfig(), SliveTest.class);
  job.setInputFormat(DummyInputFormat.class);
  FileOutputFormat.setOutputPath(job, config.getOutputPath());
  job.setMapperClass(SliveMapper.class);
  job.setPartitionerClass(SlivePartitioner.class);
  job.setReducerClass(SliveReducer.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  job.setOutputFormat(TextOutputFormat.class);
  TextOutputFormat.setCompressOutput(job, false);
  job.setNumReduceTasks(config.getReducerAmount());
  job.setNumMapTasks(config.getMapAmount());
  return job;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:SliveTest.java

示例4: joinAs

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
private static void joinAs(String jointype,
    Class<? extends SimpleCheckerBase> c) throws Exception {
  final int srcs = 4;
  Configuration conf = new Configuration();
  JobConf job = new JobConf(conf, c);
  Path base = cluster.getFileSystem().makeQualified(new Path("/"+jointype));
  Path[] src = writeSimpleSrc(base, conf, srcs);
  job.set("mapreduce.join.expr", CompositeInputFormat.compose(jointype,
      SequenceFileInputFormat.class, src));
  job.setInt("testdatamerge.sources", srcs);
  job.setInputFormat(CompositeInputFormat.class);
  FileOutputFormat.setOutputPath(job, new Path(base, "out"));

  job.setMapperClass(c);
  job.setReducerClass(c);
  job.setOutputKeyClass(IntWritable.class);
  job.setOutputValueClass(IntWritable.class);
  JobClient.runJob(job);
  base.getFileSystem(job).delete(base, true);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:TestDatamerge.java

示例5: testEmptyJoin

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void testEmptyJoin() throws Exception {
  JobConf job = new JobConf();
  Path base = cluster.getFileSystem().makeQualified(new Path("/empty"));
  Path[] src = { new Path(base,"i0"), new Path("i1"), new Path("i2") };
  job.set("mapreduce.join.expr", CompositeInputFormat.compose("outer",
      Fake_IF.class, src));
  job.setInputFormat(CompositeInputFormat.class);
  FileOutputFormat.setOutputPath(job, new Path(base, "out"));

  job.setMapperClass(IdentityMapper.class);
  job.setReducerClass(IdentityReducer.class);
  job.setOutputKeyClass(IncomparableKey.class);
  job.setOutputValueClass(NullWritable.class);

  JobClient.runJob(job);
  base.getFileSystem(job).delete(base, true);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:TestDatamerge.java

示例6: createCopyJob

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Creates a simple copy job.
 * 
 * @param indirs List of input directories.
 * @param outdir Output directory.
 * @return JobConf initialised for a simple copy job.
 * @throws Exception If an error occurs creating job configuration.
 */
static JobConf createCopyJob(List<Path> indirs, Path outdir) throws Exception {

  Configuration defaults = new Configuration();
  JobConf theJob = new JobConf(defaults, TestJobControl.class);
  theJob.setJobName("DataMoveJob");

  FileInputFormat.setInputPaths(theJob, indirs.toArray(new Path[0]));
  theJob.setMapperClass(DataCopy.class);
  FileOutputFormat.setOutputPath(theJob, outdir);
  theJob.setOutputKeyClass(Text.class);
  theJob.setOutputValueClass(Text.class);
  theJob.setReducerClass(DataCopy.class);
  theJob.setNumMapTasks(12);
  theJob.setNumReduceTasks(4);
  return theJob;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:JobControlTestUtils.java

示例7: createJobConf

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
private static JobConf createJobConf(Configuration conf) {
  JobConf jobconf = new JobConf(conf, DistCpV1.class);
  jobconf.setJobName(conf.get("mapred.job.name", NAME));

  // turn off speculative execution, because DFS doesn't handle
  // multiple writers to the same file.
  jobconf.setMapSpeculativeExecution(false);

  jobconf.setInputFormat(CopyInputFormat.class);
  jobconf.setOutputKeyClass(Text.class);
  jobconf.setOutputValueClass(Text.class);

  jobconf.setMapperClass(CopyFilesMapper.class);
  jobconf.setNumReduceTasks(0);
  return jobconf;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:17,代碼來源:DistCpV1.java

示例8: initTableReduceJob

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Use this before submitting a TableReduce job. It will
 * appropriately set up the JobConf.
 *
 * @param table  The output table.
 * @param reducer  The reducer class to use.
 * @param job  The current job configuration to adjust.
 * @param partitioner  Partitioner to use. Pass <code>null</code> to use
 * default partitioner.
 * @param addDependencyJars upload HBase jars and jars for any of the configured
 *           job classes via the distributed cache (tmpjars).
 * @throws IOException When determining the region count fails.
 */
public static void initTableReduceJob(String table,
  Class<? extends TableReduce> reducer, JobConf job, Class partitioner,
  boolean addDependencyJars) throws IOException {
  job.setOutputFormat(TableOutputFormat.class);
  job.setReducerClass(reducer);
  job.set(TableOutputFormat.OUTPUT_TABLE, table);
  job.setOutputKeyClass(ImmutableBytesWritable.class);
  job.setOutputValueClass(Put.class);
  job.setStrings("io.serializations", job.get("io.serializations"),
      MutationSerialization.class.getName(), ResultSerialization.class.getName());
  if (partitioner == HRegionPartitioner.class) {
    job.setPartitionerClass(HRegionPartitioner.class);
    int regions =
      MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table));
    if (job.getNumReduceTasks() > regions) {
      job.setNumReduceTasks(regions);
    }
  } else if (partitioner != null) {
    job.setPartitionerClass(partitioner);
  }
  if (addDependencyJars) {
    addDependencyJars(job);
  }
  initCredentials(job);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:39,代碼來源:TableMapReduceUtil.java

示例9: runJob

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
static boolean runJob(JobConf conf, Path inDir, Path outDir, int numMaps, 
                         int numReds) throws IOException, InterruptedException {

  FileSystem fs = FileSystem.get(conf);
  if (fs.exists(outDir)) {
    fs.delete(outDir, true);
  }
  if (!fs.exists(inDir)) {
    fs.mkdirs(inDir);
  }
  String input = "The quick brown fox\n" + "has many silly\n"
      + "red fox sox\n";
  for (int i = 0; i < numMaps; ++i) {
    DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
    file.writeBytes(input);
    file.close();
  }

  DistributedCache.addFileToClassPath(TestMRJobs.APP_JAR, conf, fs);
  conf.setOutputCommitter(CustomOutputCommitter.class);
  conf.setInputFormat(TextInputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);

  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReds);

  JobClient jobClient = new JobClient(conf);
  
  RunningJob job = jobClient.submitJob(conf);
  return jobClient.monitorAndPrintJob(conf, job);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:35,代碼來源:TestMROldApiJobs.java

示例10: testCombinerShouldUpdateTheReporter

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Test
public void testCombinerShouldUpdateTheReporter() throws Exception {
  JobConf conf = new JobConf(mrCluster.getConfig());
  int numMaps = 5;
  int numReds = 2;
  Path in = new Path(mrCluster.getTestWorkDir().getAbsolutePath(),
      "testCombinerShouldUpdateTheReporter-in");
  Path out = new Path(mrCluster.getTestWorkDir().getAbsolutePath(),
      "testCombinerShouldUpdateTheReporter-out");
  createInputOutPutFolder(in, out, numMaps);
  conf.setJobName("test-job-with-combiner");
  conf.setMapperClass(IdentityMapper.class);
  conf.setCombinerClass(MyCombinerToCheckReporter.class);
  //conf.setJarByClass(MyCombinerToCheckReporter.class);
  conf.setReducerClass(IdentityReducer.class);
  DistributedCache.addFileToClassPath(TestMRJobs.APP_JAR, conf);
  conf.setOutputCommitter(CustomOutputCommitter.class);
  conf.setInputFormat(TextInputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);

  FileInputFormat.setInputPaths(conf, in);
  FileOutputFormat.setOutputPath(conf, out);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReds);
  
  runJob(conf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:29,代碼來源:TestMRAppWithCombiner.java

示例11: createJobConf

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
private static JobConf createJobConf(Configuration conf) {
  JobConf jobconf = new JobConf(conf, DistCh.class);
  jobconf.setJobName(NAME);
  jobconf.setMapSpeculativeExecution(false);

  jobconf.setInputFormat(ChangeInputFormat.class);
  jobconf.setOutputKeyClass(Text.class);
  jobconf.setOutputValueClass(Text.class);

  jobconf.setMapperClass(ChangeFilesMapper.class);
  jobconf.setNumReduceTasks(0);
  return jobconf;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:14,代碼來源:DistCh.java

示例12: runParseTest

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void runParseTest(String fieldTerminator, String lineTerminator,
    String encloser, String escape, boolean encloseRequired)
    throws IOException {

  ClassLoader prevClassLoader = null;

  String[] argv = getArgv(true, fieldTerminator, lineTerminator,
      encloser, escape, encloseRequired);
  runImport(argv);
  try {
    String tableClassName = getTableName();

    argv = getArgv(false, fieldTerminator, lineTerminator, encloser,
        escape, encloseRequired);
    SqoopOptions opts = new ImportTool().parseArguments(argv, null,
        null, true);

    CompilationManager compileMgr = new CompilationManager(opts);
    String jarFileName = compileMgr.getJarFilename();

    // Make sure the user's class is loaded into our address space.
    prevClassLoader = ClassLoaderStack.addJarFile(jarFileName,
        tableClassName);

    JobConf job = new JobConf();
    job.setJar(jarFileName);

    // Tell the job what class we're testing.
    job.set(ReparseMapper.USER_TYPE_NAME_KEY, tableClassName);

    // use local mode in the same JVM.
    ConfigurationHelper.setJobtrackerAddr(job, "local");
    job.set("fs.default.name", "file:///");

    String warehouseDir = getWarehouseDir();
    Path warehousePath = new Path(warehouseDir);
    Path inputPath = new Path(warehousePath, getTableName());
    Path outputPath = new Path(warehousePath, getTableName() + "-out");

    job.setMapperClass(ReparseMapper.class);
    job.setNumReduceTasks(0);
    FileInputFormat.addInputPath(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(NullWritable.class);

    JobClient.runJob(job);
  } catch (InvalidOptionsException ioe) {
    LOG.error(StringUtils.stringifyException(ioe));
    fail(ioe.toString());
  } catch (ParseException pe) {
    LOG.error(StringUtils.stringifyException(pe));
    fail(pe.toString());
  } finally {
    if (null != prevClassLoader) {
      ClassLoaderStack.setCurrentClassLoader(prevClassLoader);
    }
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:61,代碼來源:SQLServerParseMethodsManualTest.java

示例13: runParseTest

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void runParseTest(String fieldTerminator, String lineTerminator,
    String encloser, String escape, boolean encloseRequired)
    throws IOException {

  ClassLoader prevClassLoader = null;

  String [] argv = getArgv(true, fieldTerminator, lineTerminator,
      encloser, escape, encloseRequired);
  runImport(argv);
  try {
    String tableClassName = getTableName();

    argv = getArgv(false, fieldTerminator, lineTerminator, encloser, escape,
        encloseRequired);
    SqoopOptions opts = new ImportTool().parseArguments(argv, null, null,
        true);

    CompilationManager compileMgr = new CompilationManager(opts);
    String jarFileName = compileMgr.getJarFilename();

    // Make sure the user's class is loaded into our address space.
    prevClassLoader = ClassLoaderStack.addJarFile(jarFileName,
        tableClassName);

    JobConf job = new JobConf();
    job.setJar(jarFileName);

    // Tell the job what class we're testing.
    job.set(ReparseMapper.USER_TYPE_NAME_KEY, tableClassName);

    // use local mode in the same JVM.
    ConfigurationHelper.setJobtrackerAddr(job, "local");
    if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
      job.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
    }
    String warehouseDir = getWarehouseDir();
    Path warehousePath = new Path(warehouseDir);
    Path inputPath = new Path(warehousePath, getTableName());
    Path outputPath = new Path(warehousePath, getTableName() + "-out");

    job.setMapperClass(ReparseMapper.class);
    job.setNumReduceTasks(0);
    FileInputFormat.addInputPath(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(NullWritable.class);

    JobClient.runJob(job);
  } catch (InvalidOptionsException ioe) {
    fail(ioe.toString());
  } catch (ParseException pe) {
    fail(pe.toString());
  } finally {
    if (null != prevClassLoader) {
      ClassLoaderStack.setCurrentClassLoader(prevClassLoader);
    }
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:60,代碼來源:TestParseMethods.java

示例14: testFieldSetter

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void testFieldSetter() throws IOException {
  ClassLoader prevClassLoader = null;

  String [] types = { "VARCHAR(32)", "VARCHAR(32)" };
  String [] vals = { "'meep'", "'foo'" };
  createTableWithColTypes(types, vals);

  String [] argv = getArgv(true, ",", "\\n", "\\\'", "\\", false);
  runImport(argv);
  try {
    String tableClassName = getTableName();

    argv = getArgv(false, ",", "\\n", "\\\'", "\\", false);
    SqoopOptions opts = new ImportTool().parseArguments(argv, null, null,
        true);

    CompilationManager compileMgr = new CompilationManager(opts);
    String jarFileName = compileMgr.getJarFilename();

    // Make sure the user's class is loaded into our address space.
    prevClassLoader = ClassLoaderStack.addJarFile(jarFileName,
        tableClassName);

    JobConf job = new JobConf();
    job.setJar(jarFileName);

    // Tell the job what class we're testing.
    job.set(ExplicitSetMapper.USER_TYPE_NAME_KEY, tableClassName);
    job.set(ExplicitSetMapper.SET_COL_KEY, BASE_COL_NAME + "0");
    job.set(ExplicitSetMapper.SET_VAL_KEY, "this-is-a-test");

    // use local mode in the same JVM.
    ConfigurationHelper.setJobtrackerAddr(job, "local");
    if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
      job.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
    }
    String warehouseDir = getWarehouseDir();
    Path warehousePath = new Path(warehouseDir);
    Path inputPath = new Path(warehousePath, getTableName());
    Path outputPath = new Path(warehousePath, getTableName() + "-out");

    job.setMapperClass(ExplicitSetMapper.class);
    job.setNumReduceTasks(0);
    FileInputFormat.addInputPath(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(NullWritable.class);

    JobClient.runJob(job);
  } catch (InvalidOptionsException ioe) {
    fail(ioe.toString());
  } catch (ParseException pe) {
    fail(pe.toString());
  } finally {
    if (null != prevClassLoader) {
      ClassLoaderStack.setCurrentClassLoader(prevClassLoader);
    }
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:61,代碼來源:TestParseMethods.java

示例15: submitAsMapReduce

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Based on args we submit the LoadGenerator as MR job.
 * Number of MapTasks is numMapTasks
 * @return exitCode for job submission
 */
private int submitAsMapReduce() {
  
  System.out.println("Running as a MapReduce job with " + 
      numMapTasks + " mapTasks;  Output to file " + mrOutDir);


  Configuration conf = new Configuration(getConf());
  
  // First set all the args of LoadGenerator as Conf vars to pass to MR tasks

  conf.set(LG_ROOT , root.toString());
  conf.setInt(LG_MAXDELAYBETWEENOPS, maxDelayBetweenOps);
  conf.setInt(LG_NUMOFTHREADS, numOfThreads);
  conf.set(LG_READPR, readProbs[0]+""); //Pass Double as string
  conf.set(LG_WRITEPR, writeProbs[0]+""); //Pass Double as string
  conf.setLong(LG_SEED, seed); //No idea what this is
  conf.setInt(LG_NUMMAPTASKS, numMapTasks);
  if (scriptFile == null && durations[0] <=0) {
    System.err.println("When run as a MapReduce job, elapsed Time or ScriptFile must be specified");
    System.exit(-1);
  }
  conf.setLong(LG_ELAPSEDTIME, durations[0]);
  conf.setLong(LG_STARTTIME, startTime); 
  if (scriptFile != null) {
    conf.set(LG_SCRIPTFILE , scriptFile);
  }
  conf.set(LG_FLAGFILE, flagFile.toString());
  
  // Now set the necessary conf variables that apply to run MR itself.
  JobConf jobConf = new JobConf(conf, LoadGenerator.class);
  jobConf.setJobName("NNLoadGeneratorViaMR");
  jobConf.setNumMapTasks(numMapTasks);
  jobConf.setNumReduceTasks(1); // 1 reducer to collect the results

  jobConf.setOutputKeyClass(Text.class);
  jobConf.setOutputValueClass(IntWritable.class);

  jobConf.setMapperClass(MapperThatRunsNNLoadGenerator.class);
  jobConf.setReducerClass(ReducerThatCollectsLGdata.class);

  jobConf.setInputFormat(DummyInputFormat.class);
  jobConf.setOutputFormat(TextOutputFormat.class);
  
  // Explicitly set number of max map attempts to 1.
  jobConf.setMaxMapAttempts(1);
  // Explicitly turn off speculative execution
  jobConf.setSpeculativeExecution(false);

  // This mapReduce job has no input but has output
  FileOutputFormat.setOutputPath(jobConf, new Path(mrOutDir));

  try {
    JobClient.runJob(jobConf);
  } catch (IOException e) {
    System.err.println("Failed to run job: " + e.getMessage());
    return -1;
  }
  return 0;
  
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:66,代碼來源:LoadGeneratorMR.java


注:本文中的org.apache.hadoop.mapred.JobConf.setOutputKeyClass方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。