当前位置: 首页>>代码示例>>Java>>正文


Java Job.setMapperClass方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.Job.setMapperClass方法的典型用法代码示例。如果您正苦于以下问题:Java Job.setMapperClass方法的具体用法?Java Job.setMapperClass怎么用?Java Job.setMapperClass使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapreduce.Job的用法示例。


在下文中一共展示了Job.setMapperClass方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: configureJob

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
/**
 * Job configuration.
 */
public static Job configureJob(Configuration conf, String [] args)
throws IOException {
  String tableName = args[0];
  String columnFamily = args[1];
  System.out.println("****" + tableName);
  conf.set(TableInputFormat.SCAN, TableMapReduceUtil.convertScanToString(new Scan()));
  conf.set(TableInputFormat.INPUT_TABLE, tableName);
  conf.set("index.tablename", tableName);
  conf.set("index.familyname", columnFamily);
  String[] fields = new String[args.length - 2];
  System.arraycopy(args, 2, fields, 0, fields.length);
  conf.setStrings("index.fields", fields);
  Job job = new Job(conf, tableName);
  job.setJarByClass(IndexBuilder.class);
  job.setMapperClass(Map.class);
  job.setNumReduceTasks(0);
  job.setInputFormatClass(TableInputFormat.class);
  job.setOutputFormatClass(MultiTableOutputFormat.class);
  return job;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:IndexBuilder.java

示例2: createFailJob

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
/**
 * Creates a simple fail job.
 * 
 * @param conf Configuration object
 * @param outdir Output directory.
 * @param indirs Comma separated input directories.
 * @return Job initialized for a simple fail job.
 * @throws Exception If an error occurs creating job configuration.
 */
public static Job createFailJob(Configuration conf, Path outdir, 
    Path... indirs) throws Exception {
  FileSystem fs = outdir.getFileSystem(conf);
  if (fs.exists(outdir)) {
    fs.delete(outdir, true);
  }
  conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 2);
  Job theJob = Job.getInstance(conf);
  theJob.setJobName("Fail-Job");

  FileInputFormat.setInputPaths(theJob, indirs);
  theJob.setMapperClass(FailMapper.class);
  theJob.setReducerClass(Reducer.class);
  theJob.setNumReduceTasks(0);
  FileOutputFormat.setOutputPath(theJob, outdir);
  theJob.setOutputKeyClass(Text.class);
  theJob.setOutputValueClass(Text.class);
  return theJob;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:MapReduceTestUtil.java

示例3: configureJob

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
/**
 * Job configuration.
 */
public static Job configureJob(Configuration conf, String [] args)
throws IOException {
  Path inputPath = new Path(args[0]);
  String tableName = args[1];
  Job job = new Job(conf, NAME + "_" + tableName);
  job.setJarByClass(Uploader.class);
  FileInputFormat.setInputPaths(job, inputPath);
  job.setInputFormatClass(SequenceFileInputFormat.class);
  job.setMapperClass(Uploader.class);
  // No reducers.  Just write straight to table.  Call initTableReducerJob
  // because it sets up the TableOutputFormat.
  TableMapReduceUtil.initTableReducerJob(tableName, null, job);
  job.setNumReduceTasks(0);
  return job;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:SampleUploader.java

示例4: configureMapper

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
@Override
protected void configureMapper(Job job, String tableName,
    String tableClassName) throws ClassNotFoundException, IOException {
  if (isHCatJob) {
    throw new IOException("Sqoop-HCatalog Integration is not supported.");
  }
  switch (getInputFileType()) {
    case AVRO_DATA_FILE:
      throw new IOException("Avro data file is not supported.");
    case SEQUENCE_FILE:
    case UNKNOWN:
    default:
      job.setMapperClass(getMapperClass());
  }

  // Concurrent writes of the same records would be problematic.
  ConfigurationHelper.setJobMapSpeculativeExecution(job, false);
  job.setMapOutputKeyClass(NullWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:21,代码来源:PostgreSQLCopyExportJob.java

示例5: run

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public int run(String[] args) throws Exception {
  Job job = Job.getInstance(getConf());
  if (args.length != 2) {
    usage();
    return 1;
  }
  TeraInputFormat.setInputPaths(job, new Path(args[0]));
  FileOutputFormat.setOutputPath(job, new Path(args[1]));
  job.setJobName("TeraValidate");
  job.setJarByClass(TeraValidate.class);
  job.setMapperClass(ValidateMapper.class);
  job.setReducerClass(ValidateReducer.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  // force a single reducer
  job.setNumReduceTasks(1);
  // force a single split 
  FileInputFormat.setMinInputSplitSize(job, Long.MAX_VALUE);
  job.setInputFormatClass(TeraInputFormat.class);
  return job.waitForCompletion(true) ? 0 : 1;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TeraValidate.java

示例6: testInputFormat

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
void testInputFormat(Class<? extends InputFormat> clazz)
    throws IOException, InterruptedException, ClassNotFoundException {
  final Job job = MapreduceTestingShim.createJob(UTIL.getConfiguration());
  job.setInputFormatClass(clazz);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setMapperClass(ExampleVerifier.class);
  job.setNumReduceTasks(0);

  LOG.debug("submitting job.");
  assertTrue("job failed!", job.waitForCompletion(true));
  assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, job.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getValue());
  assertEquals("Saw any instances of the filtered out row.", 0, job.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getValue());
  assertEquals("Saw the wrong number of instances of columnA.", 1, job.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getValue());
  assertEquals("Saw the wrong number of instances of columnB.", 1, job.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getValue());
  assertEquals("Saw the wrong count of values for the filtered-for row.", 2, job.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getValue());
  assertEquals("Saw the wrong count of values for the filtered-out row.", 0, job.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getValue());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestTableInputFormat.java

示例7: runJob

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public static void runJob(Configuration conf, Path inputPath, Path output) throws IOException, ClassNotFoundException, InterruptedException {

        Job job = new Job(conf, "Input Drive running input:"+inputPath);
        log.info("start running InputDriver");
        job.setMapOutputKeyClass(LongWritable.class);
        job.setMapOutputValueClass(indexToWordWritable.class);
        job.setOutputKeyClass(twoDimensionIndexWritable.class);
        job.setOutputValueClass(Text.class);

        job.setMapperClass(InputMapper.class);
        job.setReducerClass(InputReducer.class);
        job.setNumReduceTasks(1);
        job.setOutputFormatClass(SequenceFileOutputFormat.class);
        job.setJarByClass(InputDriver.class);

        FileInputFormat.addInputPath(job, inputPath);
        FileOutputFormat.setOutputPath(job, output);

        boolean succeeded = job.waitForCompletion(true);
        if (!succeeded) {
            throw new IllegalStateException("Job failed!");
        }

    }
 
开发者ID:huyang1,项目名称:LDA,代码行数:25,代码来源:InputDriver.java

示例8: createJob

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public Job createJob() 
throws IOException {
  Configuration conf = getConf();
  conf.setInt(MRJobConfig.NUM_MAPS, 1);
  Job job = Job.getInstance(conf, "test");
  job.setNumReduceTasks(1);
  job.setJarByClass(CredentialsTestJob.class);
  job.setNumReduceTasks(1);
  job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
  job.setInputFormatClass(SleepJob.SleepInputFormat.class);
  job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setSpeculativeExecution(false);
  job.setJobName("test job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:CredentialsTestJob.java

示例9: main

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

		Configuration conf = new Configuration();

		Job job = Job.getInstance(conf);
		job.setMapperClass(DataDividerMapper.class);
		job.setReducerClass(DataDividerReducer.class);

		job.setJarByClass(DataDividerByUser.class);

		job.setInputFormatClass(TextInputFormat.class);
		job.setOutputFormatClass(TextOutputFormat.class);
		job.setOutputKeyClass(IntWritable.class);
		job.setOutputValueClass(Text.class);

		TextInputFormat.setInputPaths(job, new Path(args[0]));
		TextOutputFormat.setOutputPath(job, new Path(args[1]));

		job.waitForCompletion(true);
	}
 
开发者ID:yogykwan,项目名称:mapreduce-samples,代码行数:21,代码来源:DataDividerByUser.java

示例10: jobListFriends

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
private Job jobListFriends(String inputPath, String outputPath) throws IOException, InterruptedException, ClassNotFoundException{      
    Job job = new Job();
    job.setJarByClass(WordCount.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    job.setMapperClass(Map.class);
    job.setReducerClass(Reduce.class);

    job.setInputFormatClass(KeyValueTextInputFormat.class);   // Need to change the import
    job.setOutputFormatClass(TextOutputFormat.class);

    FileInputFormat.addInputPath(job, new Path(inputPath));
    FileOutputFormat.setOutputPath(job, new Path(outputPath));

    job.waitForCompletion(true);

    return job;
}
 
开发者ID:dhruvmalik007,项目名称:Deep_learning_using_Java,代码行数:20,代码来源:Recommendation_program.java

示例11: configureMapper

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
@Override
protected void configureMapper(Job job, String tableName,
                               String tableClassName) {
  job.setOutputKeyClass(SqoopRecord.class);
  job.setOutputValueClass(NullWritable.class);
  job.setMapperClass(getMapperClass());
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:8,代码来源:OdpsImportJob.java

示例12: createJob

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public static Job createJob(String name, String base) throws IOException {
	Configuration conf = new Configuration();
	conf.set(Total.QUERIED_NAME, name);
	Job job = Job.getInstance(new Cluster(conf), conf);
	job.setJarByClass(Cut.class);

	// in
	String in = base;
	if (!base.endsWith("/"))
		in = in.concat("/");
	in = in.concat("employees");
	SequenceFileInputFormat.addInputPath(job, new Path(in));
	job.setInputFormatClass(SequenceFileInputFormat.class);

	// map
	job.setMapperClass(CutMapper.class);
	job.setMapOutputKeyClass(Text.class);
	job.setMapOutputValueClass(Employee.class);

	// out
	SequenceFileOutputFormat.setOutputPath(job, new Path(base + "/tmp"));
	job.setOutputFormatClass(SequenceFileOutputFormat.class);
	job.setOutputKeyClass(Text.class);
	job.setOutputValueClass(Employee.class);

	return job;
}
 
开发者ID:amritbhat786,项目名称:DocIT,代码行数:28,代码来源:Cut.java

示例13: main

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    Configuration con = new Configuration();
    Job bookJob = Job.getInstance(con, "Average Page Count");
    bookJob.setJarByClass(AveragePageCount.class);
    bookJob.setMapperClass(TextMapper.class);
    bookJob.setReducerClass(AverageReduce.class);
    bookJob.setOutputKeyClass(Text.class);
    bookJob.setOutputValueClass(IntWritable.class);

    FileInputFormat.addInputPath(bookJob, new Path("C:/Hadoop/books.txt"));
    FileOutputFormat.setOutputPath(bookJob, new Path("C:/Hadoop/BookOutput"));
    if (bookJob.waitForCompletion(true)) {
        System.exit(0);
    }
}
 
开发者ID:PacktPublishing,项目名称:Java-for-Data-Science,代码行数:16,代码来源:AveragePageCount.java

示例14: main

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public static void main(String[] args) {
    if (args.length != 2) {
        System.err.println("Usage: Month Traffic Statistics <input path> <output path>");
        System.exit(-1);
    }
    String nginxLogInput = args[0];
    String nginxLogOutput = args[1];

    Configuration configuration = new Configuration();
    try {
        Job job = Job.getInstance(configuration);
        job.setJobName("MonthTrafficStatistics");

        job.setJarByClass(MonthTrafficStatisticsMapReduce.class);

        FileInputFormat.addInputPath(job, new Path(nginxLogInput));
        FileOutputFormat.setOutputPath(job, new Path(nginxLogOutput));

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        job.setMapperClass(MonthTrafficStatisticsMapper.class);
        job.setReducerClass(MonthTrafficStatisticsReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        job.waitForCompletion(true);
    } catch (IOException | InterruptedException | ClassNotFoundException e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-mapreduce,代码行数:35,代码来源:MonthTrafficStatisticsMapReduce.java

示例15: main

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	Configuration conf = new Configuration();
	Job job = Job.getInstance(conf, "maxaverage");
	
	job.setMapperClass(MaximumAverageMapper.class);
	job.setReducerClass(MaximumAverageReducer.class);

	job.setOutputKeyClass(Text.class);
	job.setOutputValueClass(FloatWritable.class);

	FileInputFormat.setInputPaths(job, new Path(args[0]));
	FileOutputFormat.setOutputPath(job, new Path(args[1]));

	if (!job.waitForCompletion(true))
		return;
}
 
开发者ID:aadishgoel2013,项目名称:Hadoop-Codes,代码行数:17,代码来源:MaximumAverageDriver.java


注:本文中的org.apache.hadoop.mapreduce.Job.setMapperClass方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。