当前位置: 首页>>代码示例>>Java>>正文


Java Job.setMapOutputKeyClass方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.Job.setMapOutputKeyClass方法的典型用法代码示例。如果您正苦于以下问题:Java Job.setMapOutputKeyClass方法的具体用法?Java Job.setMapOutputKeyClass怎么用?Java Job.setMapOutputKeyClass使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapreduce.Job的用法示例。


在下文中一共展示了Job.setMapOutputKeyClass方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public static void main(String [] args) throws Exception
{
  Path outDir = new Path("output");
  Configuration conf = new Configuration();
  Job job = Job.getInstance(conf, "user name check"); 
	
	
  job.setJarByClass(UserNamePermission.class);
  job.setMapperClass(UserNamePermission.UserNameMapper.class);
  job.setCombinerClass(UserNamePermission.UserNameReducer.class);
  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(Text.class);
  job.setReducerClass(UserNamePermission.UserNameReducer.class);
  job.setNumReduceTasks(1);
    
  job.setInputFormatClass(TextInputFormat.class);
  TextInputFormat.addInputPath(job, new Path("input"));
  FileOutputFormat.setOutputPath(job, outDir);
    
  System.exit(job.waitForCompletion(true) ? 0 : 1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:UserNamePermission.java

示例2: initMultiTableSnapshotMapperJob

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
/**
 * Sets up the job for reading from one or more table snapshots, with one or more scans
 * per snapshot.
 * It bypasses hbase servers and read directly from snapshot files.
 *
 * @param snapshotScans     map of snapshot name to scans on that snapshot.
 * @param mapper            The mapper class to use.
 * @param outputKeyClass    The class of the output key.
 * @param outputValueClass  The class of the output value.
 * @param job               The current job to adjust.  Make sure the passed job is
 *                          carrying all necessary HBase configuration.
 * @param addDependencyJars upload HBase jars and jars for any of the configured
 *                          job classes via the distributed cache (tmpjars).
 */
public static void initMultiTableSnapshotMapperJob(Map<String, Collection<Scan>> snapshotScans,
    Class<? extends TableMapper> mapper, Class<?> outputKeyClass, Class<?> outputValueClass,
    Job job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException {
  MultiTableSnapshotInputFormat.setInput(job.getConfiguration(), snapshotScans, tmpRestoreDir);

  job.setInputFormatClass(MultiTableSnapshotInputFormat.class);
  if (outputValueClass != null) {
    job.setMapOutputValueClass(outputValueClass);
  }
  if (outputKeyClass != null) {
    job.setMapOutputKeyClass(outputKeyClass);
  }
  job.setMapperClass(mapper);
  Configuration conf = job.getConfiguration();
  HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));

  if (addDependencyJars) {
    addDependencyJars(job);
  }

  resetCacheConfig(job.getConfiguration());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:TableMapReduceUtil.java

示例3: init

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
/** {@inheritDoc} */
@Override
public void init(Job job) {
  // setup mapper
  job.setMapperClass(PartitionMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(SummationWritable.class);

  // setup partitioner
  job.setPartitionerClass(IndexPartitioner.class);

  // setup reducer
  job.setReducerClass(SummingReducer.class);
  job.setOutputKeyClass(NullWritable.class);
  job.setOutputValueClass(TaskResult.class);
  final Configuration conf = job.getConfiguration();
  final int nParts = conf.getInt(N_PARTS, 1);
  job.setNumReduceTasks(nParts);

  // setup input
  job.setInputFormatClass(SummationInputFormat.class);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:DistSum.java

示例4: main

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	Configuration conf = new Configuration();

	Job job = Job.getInstance(conf);
	job.setJarByClass(Multiplication.class);

	ChainMapper.addMapper(job, CooccurrenceMapper.class, LongWritable.class, Text.class, Text.class, Text.class, conf);
	ChainMapper.addMapper(job, RatingMapper.class, Text.class, Text.class, Text.class, Text.class, conf);

	job.setMapperClass(CooccurrenceMapper.class);
	job.setMapperClass(RatingMapper.class);

	job.setReducerClass(MultiplicationReducer.class);

	job.setMapOutputKeyClass(Text.class);
	job.setMapOutputValueClass(Text.class);
	job.setOutputKeyClass(Text.class);
	job.setOutputValueClass(DoubleWritable.class);

	MultipleInputs.addInputPath(job, new Path(args[0]), TextInputFormat.class, CooccurrenceMapper.class);
	MultipleInputs.addInputPath(job, new Path(args[1]), TextInputFormat.class, RatingMapper.class);

	TextOutputFormat.setOutputPath(job, new Path(args[2]));
	
	job.waitForCompletion(true);
}
 
开发者ID:yogykwan,项目名称:mapreduce-samples,代码行数:27,代码来源:Multiplication.java

示例5: main

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	Configuration conf = new Configuration();
	
	Job job =Job.getInstance(conf);
	job.setJobName("MaxThreeLabel");
	job.setJarByClass(MaxThreeLabel.class);
	
	job.setMapOutputKeyClass(Text.class);
	job.setMapOutputValueClass(TextArrayWritable.class);
	
	job.setOutputKeyClass(Text.class);
	job.setOutputValueClass(Text.class);
	
	job.setMapperClass(MaxThreeLabelMap.class);
	job.setReducerClass(MaxThreeLabelReduce.class);
	
	job.setInputFormatClass(TextInputFormat.class);
	job.setOutputFormatClass(TextOutputFormat.class);
	
	FileInputFormat.addInputPath(job, new Path(args[0]));
	FileOutputFormat.setOutputPath(job, new Path(args[1]));
	boolean wait = job.waitForCompletion(true);
	System.exit(wait ? 0 : 1);
}
 
开发者ID:lzmhhh123,项目名称:Wikipedia-Index,代码行数:25,代码来源:MaxThreeLabel.java

示例6: createJob

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
/**
 * Create Job object for submitting it with all the configuration
 *
 * @return Reference to job object.
 * @throws IOException Exception if any
 */
private Job createJob() throws IOException {
  String jobName = "s3mapreducecp";
  String userChosenName = getConf().get(MRJobConfig.JOB_NAME);
  if (userChosenName != null) {
    jobName += ": " + userChosenName;
  }

  Job job = Job.getInstance(getConf());
  job.setJobName(jobName);
  job.setInputFormatClass(ConfigurationUtil.getStrategy(getConf(), inputOptions));
  job.setJarByClass(CopyMapper.class);
  configureOutputFormat(job);

  job.setMapperClass(CopyMapper.class);
  job.setNumReduceTasks(0);
  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(Text.class);
  job.setOutputFormatClass(CopyOutputFormat.class);
  job.getConfiguration().set(MRJobConfig.MAP_SPECULATIVE, "false");
  job.getConfiguration().set(MRJobConfig.NUM_MAPS, String.valueOf(inputOptions.getMaxMaps()));

  return job;
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:30,代码来源:S3MapReduceCp.java

示例7: configure

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
/**
 * Configure the {@link Job} for enabling compression emulation.
 */
static void configure(final Job job) throws IOException, InterruptedException,
                                            ClassNotFoundException {
  // set the random text mapper
  job.setMapperClass(RandomTextDataMapper.class);
  job.setNumReduceTasks(0);
  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(Text.class);
  job.setInputFormatClass(GenDataFormat.class);
  job.setJarByClass(GenerateData.class);

  // set the output compression true
  FileOutputFormat.setCompressOutput(job, true);
  try {
    FileInputFormat.addInputPath(job, new Path("ignored"));
  } catch (IOException e) {
    LOG.error("Error while adding input path ", e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:CompressionEmulationUtil.java

示例8: createJob

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public static Job createJob(String name, String base) throws IOException {
	Configuration conf = new Configuration();
	conf.set(Total.QUERIED_NAME, name);
	Job job = Job.getInstance(new Cluster(conf), conf);
	job.setJarByClass(Cut.class);

	// in
	String in = base;
	if (!base.endsWith("/"))
		in = in.concat("/");
	in = in.concat("employees");
	SequenceFileInputFormat.addInputPath(job, new Path(in));
	job.setInputFormatClass(SequenceFileInputFormat.class);

	// map
	job.setMapperClass(CutMapper.class);
	job.setMapOutputKeyClass(Text.class);
	job.setMapOutputValueClass(Employee.class);

	// out
	SequenceFileOutputFormat.setOutputPath(job, new Path(base + "/tmp"));
	job.setOutputFormatClass(SequenceFileOutputFormat.class);
	job.setOutputKeyClass(Text.class);
	job.setOutputValueClass(Employee.class);

	return job;
}
 
开发者ID:amritbhat786,项目名称:DocIT,代码行数:28,代码来源:Cut.java

示例9: createJob

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
/** Create and setup a job */
private static Job createJob(String name, Configuration conf
    ) throws IOException {
  final Job job = Job.getInstance(conf, NAME + "_" + name);
  final Configuration jobconf = job.getConfiguration();
  job.setJarByClass(BaileyBorweinPlouffe.class);

  // setup mapper
  job.setMapperClass(BbpMapper.class);
  job.setMapOutputKeyClass(LongWritable.class);
  job.setMapOutputValueClass(BytesWritable.class);

  // setup reducer
  job.setReducerClass(BbpReducer.class);
  job.setOutputKeyClass(LongWritable.class);
  job.setOutputValueClass(BytesWritable.class);
  job.setNumReduceTasks(1);

  // setup input
  job.setInputFormatClass(BbpInputFormat.class);

  // disable task timeout
  jobconf.setLong(MRJobConfig.TASK_TIMEOUT, 0);

  // do not use speculative execution
  jobconf.setBoolean(MRJobConfig.MAP_SPECULATIVE, false);
  jobconf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, false);
  return job;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:BaileyBorweinPlouffe.java

示例10: main

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public static void main(String[] args) {
    if (args.length != 2) {
        System.err.println("Usage: Year Traffic Statistics <input path> <output path>");
        System.exit(-1);
    }
    String nginxLogInput = args[0];
    String nginxLogOutput = args[1];

    Configuration configuration = new Configuration();
    try {
        Job job = Job.getInstance(configuration);
        job.setJobName("YearTrafficStatistics");

        job.setJarByClass(YearTrafficStatisticsMapReduce.class);

        FileInputFormat.addInputPath(job, new Path(nginxLogInput));
        FileOutputFormat.setOutputPath(job, new Path(nginxLogOutput));

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        job.setMapperClass(YearTrafficStatisticsMapper.class);
        job.setReducerClass(YearTrafficStatisticsReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        job.waitForCompletion(true);
    } catch (IOException | InterruptedException | ClassNotFoundException e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-mapreduce,代码行数:35,代码来源:YearTrafficStatisticsMapReduce.java

示例11: testScanFromConfiguration

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
/**
 * Tests an MR Scan initialized from properties set in the Configuration.
 * 
 * @throws IOException
 * @throws ClassNotFoundException
 * @throws InterruptedException
 */
protected void testScanFromConfiguration(String start, String stop, String last)
throws IOException, InterruptedException, ClassNotFoundException {
  String jobName = "ScanFromConfig" + (start != null ? start.toUpperCase() : "Empty") +
    "To" + (stop != null ? stop.toUpperCase() : "Empty");
  Configuration c = new Configuration(TEST_UTIL.getConfiguration());
  c.set(TableInputFormat.INPUT_TABLE, Bytes.toString(TABLE_NAME));
  c.set(TableInputFormat.SCAN_COLUMN_FAMILY, Bytes.toString(INPUT_FAMILY));
  c.set(KEY_STARTROW, start != null ? start : "");
  c.set(KEY_LASTROW, last != null ? last : "");

  if (start != null) {
    c.set(TableInputFormat.SCAN_ROW_START, start);
  }

  if (stop != null) {
    c.set(TableInputFormat.SCAN_ROW_STOP, stop);
  }

  Job job = new Job(c, jobName);
  job.setMapperClass(ScanMapper.class);
  job.setReducerClass(ScanReducer.class);
  job.setMapOutputKeyClass(ImmutableBytesWritable.class);
  job.setMapOutputValueClass(ImmutableBytesWritable.class);
  job.setInputFormatClass(TableInputFormat.class);
  job.setNumReduceTasks(1);
  FileOutputFormat.setOutputPath(job, new Path(job.getJobName()));
  TableMapReduceUtil.addDependencyJars(job);
  assertTrue(job.waitForCompletion(true));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:TestTableInputFormatScanBase.java

示例12: createJob

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
/**
 * Create Job object for submitting it, with all the configuration
 *
 * @return Reference to job object.
 * @throws IOException - Exception if any
 */
private Job createJob() throws IOException {
  String jobName = "distcp";
  String userChosenName = getConf().get(JobContext.JOB_NAME);
  if (userChosenName != null)
    jobName += ": " + userChosenName;
  Job job = Job.getInstance(getConf());
  job.setJobName(jobName);
  job.setInputFormatClass(DistCpUtils.getStrategy(getConf(), inputOptions));
  job.setJarByClass(CopyMapper.class);
  configureOutputFormat(job);

  job.setMapperClass(CopyMapper.class);
  job.setNumReduceTasks(0);
  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(Text.class);
  job.setOutputFormatClass(CopyOutputFormat.class);
  job.getConfiguration().set(JobContext.MAP_SPECULATIVE, "false");
  job.getConfiguration().set(JobContext.NUM_MAPS,
                String.valueOf(inputOptions.getMaxMaps()));

  if (inputOptions.getSslConfigurationFile() != null) {
    setupSSLConfig(job);
  }

  inputOptions.appendToConf(job.getConfiguration());
  return job;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:DistCp.java

示例13: run

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public void run() throws IOException, ClassNotFoundException, InterruptedException {

        Job job = Job.getInstance(configuration, "com.romanysik.util.Transposer");

        job.setJarByClass(MRNMF.class);

        FileInputFormat.addInputPath(job, new Path(inputPath));
        FileOutputFormat.setOutputPath(job, new Path(outputPath));

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        job.setMapOutputKeyClass(LongWritable.class);
        job.setMapOutputValueClass(Text.class);

        job.setMapperClass(TMapper.class);
        job.setReducerClass(TReducer.class);

        job.waitForCompletion(true);
    }
 
开发者ID:Romm17,项目名称:MRNMF,代码行数:21,代码来源:Transposer.java

示例14: run

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public void run() throws IOException, ClassNotFoundException, InterruptedException {
    
    Job job = Job.getInstance(configuration, "com.romanysik.matrixmultiplication.MM1");

    job.setJarByClass(MRNMF.class);

    FileInputFormat.addInputPath(job, new Path(inputPath));
    FileOutputFormat.setOutputPath(job, new Path(outputPath));

    job.setInputFormatClass(TextInputFormat.class);
    job.setOutputFormatClass(TextOutputFormat.class);

    job.setMapOutputKeyClass(IntWritable.class);
    job.setMapOutputValueClass(Text.class);

    job.setMapperClass(MM1Mapper.class);

    job.waitForCompletion(true);
}
 
开发者ID:Romm17,项目名称:MRNMF,代码行数:20,代码来源:MM1.java

示例15: main

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public static void main(String[] args) throws ClassNotFoundException, IOException, InterruptedException {
		// configure n-gram mapreduce job
		Configuration conf1 = new Configuration();
		conf1.set("textinputformat.record.delimiter", "."); // read a complete sentence as a line
		conf1.set("GRAM_NUMBER", args[2]);
		Job job1 = Job.getInstance(conf1);
		job1.setNumReduceTasks(3);
		job1.setJobName("NGram");
		job1.setJarByClass(Dispatcher.class);
		job1.setMapperClass(NGramBuilder.NGramMapper.class);
		job1.setReducerClass(NGramBuilder.NGramReducer.class);
		job1.setOutputKeyClass(Text.class);
		job1.setOutputValueClass(IntWritable.class);
		job1.setInputFormatClass(TextInputFormat.class); // default format: reads lines of text files
		job1.setOutputFormatClass(TextOutputFormat.class); // default format: key \t value
		TextInputFormat.setInputPaths(job1, new Path(args[0]));
		TextOutputFormat.setOutputPath(job1, new Path(args[1]));
		job1.waitForCompletion(true); // language model won't start to build until the n-gram library completely built
		
		// configure language model mapreduce job
		Configuration conf2 = new Configuration();
		conf2.set("THRESHOLD", args[3]);
		conf2.set("TOP_K", args[4]);
		DBConfiguration.configureDB(conf2, "com.mysql.jdbc.Driver", "jdbc:mysql://127.0.0.1:3306/tp", "root", "123456"); // establish connection with mySQL database   
		Job job2 = Job.getInstance(conf2);
		job2.setNumReduceTasks(3);
		job2.setJobName("LModel");
		job2.setJarByClass(Dispatcher.class);			
		job2.addArchiveToClassPath(new Path("/mysql/mysql-connector-java-5.1.39-bin.jar")); // putting this jar file into jre/lib/ext is recommended	
		job2.setMapperClass(LanguageModel.ModelMapper.class);
		job2.setReducerClass(LanguageModel.ModelReducer.class);
		job2.setMapOutputKeyClass(Text.class); // Mapper emits different key type than the Reducer
		job2.setMapOutputValueClass(Text.class); // Mapper emits different value type than the Reducer
		job2.setOutputKeyClass(DBOutputWritable.class);
		job2.setOutputValueClass(NullWritable.class);
		job2.setInputFormatClass(TextInputFormat.class);
		job2.setOutputFormatClass(DBOutputFormat.class);
		TextInputFormat.setInputPaths(job2, new Path(args[1]));
		DBOutputFormat.setOutput(job2, "LanguageModel", new String[] {"starter", "follower", "probability"});
		System.exit(job2.waitForCompletion(true) ? 0 : 1);
}
 
开发者ID:JianyangZhang,项目名称:Hot-Search-Terms,代码行数:42,代码来源:Dispatcher.java


注:本文中的org.apache.hadoop.mapreduce.Job.setMapOutputKeyClass方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。