當前位置: 首頁>>代碼示例>>Java>>正文


Java JobConf.setNumReduceTasks方法代碼示例

本文整理匯總了Java中org.apache.hadoop.mapred.JobConf.setNumReduceTasks方法的典型用法代碼示例。如果您正苦於以下問題:Java JobConf.setNumReduceTasks方法的具體用法?Java JobConf.setNumReduceTasks怎麽用?Java JobConf.setNumReduceTasks使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.mapred.JobConf的用法示例。


在下文中一共展示了JobConf.setNumReduceTasks方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testInputFormat

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
void testInputFormat(Class<? extends InputFormat> clazz) throws IOException {
  final JobConf job = MapreduceTestingShim.getJobConf(mrCluster);
  job.setInputFormat(clazz);
  job.setOutputFormat(NullOutputFormat.class);
  job.setMapperClass(ExampleVerifier.class);
  job.setNumReduceTasks(0);
  LOG.debug("submitting job.");
  final RunningJob run = JobClient.runJob(job);
  assertTrue("job failed!", run.isSuccessful());
  assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getCounter());
  assertEquals("Saw any instances of the filtered out row.", 0, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getCounter());
  assertEquals("Saw the wrong number of instances of columnA.", 1, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getCounter());
  assertEquals("Saw the wrong number of instances of columnB.", 1, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getCounter());
  assertEquals("Saw the wrong count of values for the filtered-for row.", 2, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getCounter());
  assertEquals("Saw the wrong count of values for the filtered-out row.", 0, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getCounter());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestTableInputFormat.java

示例2: runTests

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Run the test
 * 
 * @throws IOException on error
 */
public static void runTests() throws IOException {
  config.setLong("io.bytes.per.checksum", bytesPerChecksum);
  
  JobConf job = new JobConf(config, NNBench.class);

  job.setJobName("NNBench-" + operation);
  FileInputFormat.setInputPaths(job, new Path(baseDir, CONTROL_DIR_NAME));
  job.setInputFormat(SequenceFileInputFormat.class);
  
  // Explicitly set number of max map attempts to 1.
  job.setMaxMapAttempts(1);
  
  // Explicitly turn off speculative execution
  job.setSpeculativeExecution(false);

  job.setMapperClass(NNBenchMapper.class);
  job.setReducerClass(NNBenchReducer.class);

  FileOutputFormat.setOutputPath(job, new Path(baseDir, OUTPUT_DIR_NAME));
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  job.setNumReduceTasks((int) numberOfReduces);
  JobClient.runJob(job);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:30,代碼來源:NNBench.java

示例3: getJob

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Sets up a job conf for the given job using the given config object. Ensures
 * that the correct input format is set, the mapper and and reducer class and
 * the input and output keys and value classes along with any other job
 * configuration.
 * 
 * @param config
 * @return JobConf representing the job to be ran
 * @throws IOException
 */
private JobConf getJob(ConfigExtractor config) throws IOException {
  JobConf job = new JobConf(config.getConfig(), SliveTest.class);
  job.setInputFormat(DummyInputFormat.class);
  FileOutputFormat.setOutputPath(job, config.getOutputPath());
  job.setMapperClass(SliveMapper.class);
  job.setPartitionerClass(SlivePartitioner.class);
  job.setReducerClass(SliveReducer.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  job.setOutputFormat(TextOutputFormat.class);
  TextOutputFormat.setCompressOutput(job, false);
  job.setNumReduceTasks(config.getReducerAmount());
  job.setNumMapTasks(config.getMapAmount());
  return job;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:SliveTest.java

示例4: runJob

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Override
protected void runJob(String jobName, Configuration c, List<Scan> scans)
    throws IOException, InterruptedException, ClassNotFoundException {
  JobConf job = new JobConf(TEST_UTIL.getConfiguration());

  job.setJobName(jobName);
  job.setMapperClass(Mapper.class);
  job.setReducerClass(Reducer.class);

  TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), Mapper.class,
      ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir);

  TableMapReduceUtil.addDependencyJars(job);

  job.setReducerClass(Reducer.class);
  job.setNumReduceTasks(1); // one to get final "first" and "last" key
  FileOutputFormat.setOutputPath(job, new Path(job.getJobName()));
  LOG.info("Started " + job.getJobName());

  RunningJob runningJob = JobClient.runJob(job);
  runningJob.waitForCompletion();
  assertTrue(runningJob.isSuccessful());
  LOG.info("After map/reduce completion - job " + jobName);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:TestMultiTableSnapshotInputFormat.java

示例5: createJobConf

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
private static JobConf createJobConf(Configuration conf) {
  JobConf jobconf = new JobConf(conf, DistCpV1.class);
  jobconf.setJobName(conf.get("mapred.job.name", NAME));

  // turn off speculative execution, because DFS doesn't handle
  // multiple writers to the same file.
  jobconf.setMapSpeculativeExecution(false);

  jobconf.setInputFormat(CopyInputFormat.class);
  jobconf.setOutputKeyClass(Text.class);
  jobconf.setOutputValueClass(Text.class);

  jobconf.setMapperClass(CopyFilesMapper.class);
  jobconf.setNumReduceTasks(0);
  return jobconf;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:17,代碼來源:DistCpV1.java

示例6: createSubmittableJob

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * @param args
 * @return the JobConf
 * @throws IOException
 */
public JobConf createSubmittableJob(String[] args) throws IOException {
  JobConf c = new JobConf(getConf(), getClass());
  c.setJobName(NAME);
  // Columns are space delimited
  StringBuilder sb = new StringBuilder();
  final int columnoffset = 2;
  for (int i = columnoffset; i < args.length; i++) {
    if (i > columnoffset) {
      sb.append(" ");
    }
    sb.append(args[i]);
  }
  // Second argument is the table name.
  TableMapReduceUtil.initTableMapJob(args[1], sb.toString(),
    RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, c);
  c.setNumReduceTasks(0);
  // First arg is the output directory.
  FileOutputFormat.setOutputPath(c, new Path(args[0]));
  return c;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:RowCounter.java

示例7: initTableReduceJob

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Use this before submitting a TableReduce job. It will
 * appropriately set up the JobConf.
 *
 * @param table  The output table.
 * @param reducer  The reducer class to use.
 * @param job  The current job configuration to adjust.
 * @param partitioner  Partitioner to use. Pass <code>null</code> to use
 * default partitioner.
 * @param addDependencyJars upload HBase jars and jars for any of the configured
 *           job classes via the distributed cache (tmpjars).
 * @throws IOException When determining the region count fails.
 */
public static void initTableReduceJob(String table,
  Class<? extends TableReduce> reducer, JobConf job, Class partitioner,
  boolean addDependencyJars) throws IOException {
  job.setOutputFormat(TableOutputFormat.class);
  job.setReducerClass(reducer);
  job.set(TableOutputFormat.OUTPUT_TABLE, table);
  job.setOutputKeyClass(ImmutableBytesWritable.class);
  job.setOutputValueClass(Put.class);
  job.setStrings("io.serializations", job.get("io.serializations"),
      MutationSerialization.class.getName(), ResultSerialization.class.getName());
  if (partitioner == HRegionPartitioner.class) {
    job.setPartitionerClass(HRegionPartitioner.class);
    int regions =
      MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table));
    if (job.getNumReduceTasks() > regions) {
      job.setNumReduceTasks(regions);
    }
  } else if (partitioner != null) {
    job.setPartitionerClass(partitioner);
  }
  if (addDependencyJars) {
    addDependencyJars(job);
  }
  initCredentials(job);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:39,代碼來源:TableMapReduceUtil.java

示例8: shoudBeValidMapReduceWithPartitionerEvaluation

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Test
@SuppressWarnings("deprecation")
public void shoudBeValidMapReduceWithPartitionerEvaluation()
    throws IOException {
  Configuration cfg = UTIL.getConfiguration();
  JobConf jobConf = new JobConf(cfg);
  try {
    jobConf.setJobName("process row task");
    jobConf.setNumReduceTasks(2);
    TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY),
        ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class,
        jobConf);

    TableMapReduceUtil.initTableReduceJob(TABLE_NAME,
        ClassificatorRowReduce.class, jobConf, HRegionPartitioner.class);
    RunningJob job = JobClient.runJob(jobConf);
    assertTrue(job.isSuccessful());
  } finally {
    if (jobConf != null)
      FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestTableMapReduceUtil.java

示例9: shoudBeValidMapReduceEvaluation

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Test
@SuppressWarnings("deprecation")
public void shoudBeValidMapReduceEvaluation() throws Exception {
  Configuration cfg = UTIL.getConfiguration();
  JobConf jobConf = new JobConf(cfg);
  try {
    jobConf.setJobName("process row task");
    jobConf.setNumReduceTasks(1);
    TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY),
        ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class,
        jobConf);
    TableMapReduceUtil.initTableReduceJob(TABLE_NAME,
        ClassificatorRowReduce.class, jobConf);
    RunningJob job = JobClient.runJob(jobConf);
    assertTrue(job.isSuccessful());
  } finally {
    if (jobConf != null)
      FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:21,代碼來源:TestTableMapReduceUtil.java

示例10: doTestWithMapReduce

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
    String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
    int expectedNumSplits, boolean shutdownCluster) throws Exception {

  //create the table and snapshot
  createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);

  if (shutdownCluster) {
    util.shutdownMiniHBaseCluster();
  }

  try {
    // create the job
    JobConf jobConf = new JobConf(util.getConfiguration());

    jobConf.setJarByClass(util.getClass());
    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(jobConf,
      TestTableSnapshotInputFormat.class);

    TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS,
      TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
      NullWritable.class, jobConf, true, tableDir);

    jobConf.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class);
    jobConf.setNumReduceTasks(1);
    jobConf.setOutputFormat(NullOutputFormat.class);

    RunningJob job = JobClient.runJob(jobConf);
    Assert.assertTrue(job.isSuccessful());
  } finally {
    if (!shutdownCluster) {
      util.getHBaseAdmin().deleteSnapshot(snapshotName);
      util.deleteTable(tableName);
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:37,代碼來源:TestTableSnapshotInputFormat.java

示例11: runJob

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
static boolean runJob(JobConf conf, Path inDir, Path outDir, int numMaps, 
                         int numReds) throws IOException, InterruptedException {

  FileSystem fs = FileSystem.get(conf);
  if (fs.exists(outDir)) {
    fs.delete(outDir, true);
  }
  if (!fs.exists(inDir)) {
    fs.mkdirs(inDir);
  }
  String input = "The quick brown fox\n" + "has many silly\n"
      + "red fox sox\n";
  for (int i = 0; i < numMaps; ++i) {
    DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
    file.writeBytes(input);
    file.close();
  }

  DistributedCache.addFileToClassPath(TestMRJobs.APP_JAR, conf, fs);
  conf.setOutputCommitter(CustomOutputCommitter.class);
  conf.setInputFormat(TextInputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);

  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReds);

  JobClient jobClient = new JobClient(conf);
  
  RunningJob job = jobClient.submitJob(conf);
  return jobClient.monitorAndPrintJob(conf, job);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:35,代碼來源:TestMROldApiJobs.java

示例12: testCombinerShouldUpdateTheReporter

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Test
public void testCombinerShouldUpdateTheReporter() throws Exception {
  JobConf conf = new JobConf(mrCluster.getConfig());
  int numMaps = 5;
  int numReds = 2;
  Path in = new Path(mrCluster.getTestWorkDir().getAbsolutePath(),
      "testCombinerShouldUpdateTheReporter-in");
  Path out = new Path(mrCluster.getTestWorkDir().getAbsolutePath(),
      "testCombinerShouldUpdateTheReporter-out");
  createInputOutPutFolder(in, out, numMaps);
  conf.setJobName("test-job-with-combiner");
  conf.setMapperClass(IdentityMapper.class);
  conf.setCombinerClass(MyCombinerToCheckReporter.class);
  //conf.setJarByClass(MyCombinerToCheckReporter.class);
  conf.setReducerClass(IdentityReducer.class);
  DistributedCache.addFileToClassPath(TestMRJobs.APP_JAR, conf);
  conf.setOutputCommitter(CustomOutputCommitter.class);
  conf.setInputFormat(TextInputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);

  FileInputFormat.setInputPaths(conf, in);
  FileOutputFormat.setOutputPath(conf, out);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReds);
  
  runJob(conf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:29,代碼來源:TestMRAppWithCombiner.java

示例13: createJobConf

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
private static JobConf createJobConf(Configuration conf) {
  JobConf jobconf = new JobConf(conf, DistCh.class);
  jobconf.setJobName(NAME);
  jobconf.setMapSpeculativeExecution(false);

  jobconf.setInputFormat(ChangeInputFormat.class);
  jobconf.setOutputKeyClass(Text.class);
  jobconf.setOutputValueClass(Text.class);

  jobconf.setMapperClass(ChangeFilesMapper.class);
  jobconf.setNumReduceTasks(0);
  return jobconf;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:14,代碼來源:DistCh.java

示例14: limitNumReduceTasks

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
 * Ensures that the given number of reduce tasks for the given job
 * configuration does not exceed the number of regions for the given table.
 *
 * @param table  The table to get the region count for.
 * @param job  The current job configuration to adjust.
 * @throws IOException When retrieving the table details fails.
 */
// Used by tests.
public static void limitNumReduceTasks(String table, JobConf job)
throws IOException {
  int regions =
    MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table));
  if (job.getNumReduceTasks() > regions)
    job.setNumReduceTasks(regions);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:17,代碼來源:TableMapReduceUtil.java

示例15: runParseTest

import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void runParseTest(String fieldTerminator, String lineTerminator,
    String encloser, String escape, boolean encloseRequired)
    throws IOException {

  ClassLoader prevClassLoader = null;

  String[] argv = getArgv(true, fieldTerminator, lineTerminator,
      encloser, escape, encloseRequired);
  runImport(argv);
  try {
    String tableClassName = getTableName();

    argv = getArgv(false, fieldTerminator, lineTerminator, encloser,
        escape, encloseRequired);
    SqoopOptions opts = new ImportTool().parseArguments(argv, null,
        null, true);

    CompilationManager compileMgr = new CompilationManager(opts);
    String jarFileName = compileMgr.getJarFilename();

    // Make sure the user's class is loaded into our address space.
    prevClassLoader = ClassLoaderStack.addJarFile(jarFileName,
        tableClassName);

    JobConf job = new JobConf();
    job.setJar(jarFileName);

    // Tell the job what class we're testing.
    job.set(ReparseMapper.USER_TYPE_NAME_KEY, tableClassName);

    // use local mode in the same JVM.
    ConfigurationHelper.setJobtrackerAddr(job, "local");
    job.set("fs.default.name", "file:///");

    String warehouseDir = getWarehouseDir();
    Path warehousePath = new Path(warehouseDir);
    Path inputPath = new Path(warehousePath, getTableName());
    Path outputPath = new Path(warehousePath, getTableName() + "-out");

    job.setMapperClass(ReparseMapper.class);
    job.setNumReduceTasks(0);
    FileInputFormat.addInputPath(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(NullWritable.class);

    JobClient.runJob(job);
  } catch (InvalidOptionsException ioe) {
    LOG.error(StringUtils.stringifyException(ioe));
    fail(ioe.toString());
  } catch (ParseException pe) {
    LOG.error(StringUtils.stringifyException(pe));
    fail(pe.toString());
  } finally {
    if (null != prevClassLoader) {
      ClassLoaderStack.setCurrentClassLoader(prevClassLoader);
    }
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:61,代碼來源:SQLServerParseMethodsManualTest.java


注:本文中的org.apache.hadoop.mapred.JobConf.setNumReduceTasks方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。