本文整理匯總了Java中org.apache.hadoop.mapreduce.Job.setMapOutputKeyClass方法的典型用法代碼示例。如果您正苦於以下問題:Java Job.setMapOutputKeyClass方法的具體用法?Java Job.setMapOutputKeyClass怎麽用?Java Job.setMapOutputKeyClass使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.mapreduce.Job
的用法示例。
在下文中一共展示了Job.setMapOutputKeyClass方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public static void main(String [] args) throws Exception
{
Path outDir = new Path("output");
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "user name check");
job.setJarByClass(UserNamePermission.class);
job.setMapperClass(UserNamePermission.UserNameMapper.class);
job.setCombinerClass(UserNamePermission.UserNameReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setReducerClass(UserNamePermission.UserNameReducer.class);
job.setNumReduceTasks(1);
job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.addInputPath(job, new Path("input"));
FileOutputFormat.setOutputPath(job, outDir);
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
示例2: initMultiTableSnapshotMapperJob
import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
/**
* Sets up the job for reading from one or more table snapshots, with one or more scans
* per snapshot.
* It bypasses hbase servers and read directly from snapshot files.
*
* @param snapshotScans map of snapshot name to scans on that snapshot.
* @param mapper The mapper class to use.
* @param outputKeyClass The class of the output key.
* @param outputValueClass The class of the output value.
* @param job The current job to adjust. Make sure the passed job is
* carrying all necessary HBase configuration.
* @param addDependencyJars upload HBase jars and jars for any of the configured
* job classes via the distributed cache (tmpjars).
*/
public static void initMultiTableSnapshotMapperJob(Map<String, Collection<Scan>> snapshotScans,
Class<? extends TableMapper> mapper, Class<?> outputKeyClass, Class<?> outputValueClass,
Job job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException {
MultiTableSnapshotInputFormat.setInput(job.getConfiguration(), snapshotScans, tmpRestoreDir);
job.setInputFormatClass(MultiTableSnapshotInputFormat.class);
if (outputValueClass != null) {
job.setMapOutputValueClass(outputValueClass);
}
if (outputKeyClass != null) {
job.setMapOutputKeyClass(outputKeyClass);
}
job.setMapperClass(mapper);
Configuration conf = job.getConfiguration();
HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
if (addDependencyJars) {
addDependencyJars(job);
}
resetCacheConfig(job.getConfiguration());
}
示例3: init
import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
/** {@inheritDoc} */
@Override
public void init(Job job) {
// setup mapper
job.setMapperClass(PartitionMapper.class);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(SummationWritable.class);
// setup partitioner
job.setPartitionerClass(IndexPartitioner.class);
// setup reducer
job.setReducerClass(SummingReducer.class);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(TaskResult.class);
final Configuration conf = job.getConfiguration();
final int nParts = conf.getInt(N_PARTS, 1);
job.setNumReduceTasks(nParts);
// setup input
job.setInputFormatClass(SummationInputFormat.class);
}
示例4: main
import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(Multiplication.class);
ChainMapper.addMapper(job, CooccurrenceMapper.class, LongWritable.class, Text.class, Text.class, Text.class, conf);
ChainMapper.addMapper(job, RatingMapper.class, Text.class, Text.class, Text.class, Text.class, conf);
job.setMapperClass(CooccurrenceMapper.class);
job.setMapperClass(RatingMapper.class);
job.setReducerClass(MultiplicationReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(DoubleWritable.class);
MultipleInputs.addInputPath(job, new Path(args[0]), TextInputFormat.class, CooccurrenceMapper.class);
MultipleInputs.addInputPath(job, new Path(args[1]), TextInputFormat.class, RatingMapper.class);
TextOutputFormat.setOutputPath(job, new Path(args[2]));
job.waitForCompletion(true);
}
示例5: main
import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job =Job.getInstance(conf);
job.setJobName("MaxThreeLabel");
job.setJarByClass(MaxThreeLabel.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(TextArrayWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setMapperClass(MaxThreeLabelMap.class);
job.setReducerClass(MaxThreeLabelReduce.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
boolean wait = job.waitForCompletion(true);
System.exit(wait ? 0 : 1);
}
示例6: createJob
import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
/**
* Create Job object for submitting it with all the configuration
*
* @return Reference to job object.
* @throws IOException Exception if any
*/
private Job createJob() throws IOException {
String jobName = "s3mapreducecp";
String userChosenName = getConf().get(MRJobConfig.JOB_NAME);
if (userChosenName != null) {
jobName += ": " + userChosenName;
}
Job job = Job.getInstance(getConf());
job.setJobName(jobName);
job.setInputFormatClass(ConfigurationUtil.getStrategy(getConf(), inputOptions));
job.setJarByClass(CopyMapper.class);
configureOutputFormat(job);
job.setMapperClass(CopyMapper.class);
job.setNumReduceTasks(0);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputFormatClass(CopyOutputFormat.class);
job.getConfiguration().set(MRJobConfig.MAP_SPECULATIVE, "false");
job.getConfiguration().set(MRJobConfig.NUM_MAPS, String.valueOf(inputOptions.getMaxMaps()));
return job;
}
示例7: configure
import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
/**
* Configure the {@link Job} for enabling compression emulation.
*/
static void configure(final Job job) throws IOException, InterruptedException,
ClassNotFoundException {
// set the random text mapper
job.setMapperClass(RandomTextDataMapper.class);
job.setNumReduceTasks(0);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setInputFormatClass(GenDataFormat.class);
job.setJarByClass(GenerateData.class);
// set the output compression true
FileOutputFormat.setCompressOutput(job, true);
try {
FileInputFormat.addInputPath(job, new Path("ignored"));
} catch (IOException e) {
LOG.error("Error while adding input path ", e);
}
}
示例8: createJob
import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public static Job createJob(String name, String base) throws IOException {
Configuration conf = new Configuration();
conf.set(Total.QUERIED_NAME, name);
Job job = Job.getInstance(new Cluster(conf), conf);
job.setJarByClass(Cut.class);
// in
String in = base;
if (!base.endsWith("/"))
in = in.concat("/");
in = in.concat("employees");
SequenceFileInputFormat.addInputPath(job, new Path(in));
job.setInputFormatClass(SequenceFileInputFormat.class);
// map
job.setMapperClass(CutMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Employee.class);
// out
SequenceFileOutputFormat.setOutputPath(job, new Path(base + "/tmp"));
job.setOutputFormatClass(SequenceFileOutputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Employee.class);
return job;
}
示例9: createJob
import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
/** Create and setup a job */
private static Job createJob(String name, Configuration conf
) throws IOException {
final Job job = Job.getInstance(conf, NAME + "_" + name);
final Configuration jobconf = job.getConfiguration();
job.setJarByClass(BaileyBorweinPlouffe.class);
// setup mapper
job.setMapperClass(BbpMapper.class);
job.setMapOutputKeyClass(LongWritable.class);
job.setMapOutputValueClass(BytesWritable.class);
// setup reducer
job.setReducerClass(BbpReducer.class);
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(BytesWritable.class);
job.setNumReduceTasks(1);
// setup input
job.setInputFormatClass(BbpInputFormat.class);
// disable task timeout
jobconf.setLong(MRJobConfig.TASK_TIMEOUT, 0);
// do not use speculative execution
jobconf.setBoolean(MRJobConfig.MAP_SPECULATIVE, false);
jobconf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, false);
return job;
}
示例10: main
import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public static void main(String[] args) {
if (args.length != 2) {
System.err.println("Usage: Year Traffic Statistics <input path> <output path>");
System.exit(-1);
}
String nginxLogInput = args[0];
String nginxLogOutput = args[1];
Configuration configuration = new Configuration();
try {
Job job = Job.getInstance(configuration);
job.setJobName("YearTrafficStatistics");
job.setJarByClass(YearTrafficStatisticsMapReduce.class);
FileInputFormat.addInputPath(job, new Path(nginxLogInput));
FileOutputFormat.setOutputPath(job, new Path(nginxLogOutput));
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setMapperClass(YearTrafficStatisticsMapper.class);
job.setReducerClass(YearTrafficStatisticsReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.waitForCompletion(true);
} catch (IOException | InterruptedException | ClassNotFoundException e) {
e.printStackTrace();
}
}
示例11: testScanFromConfiguration
import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
/**
* Tests an MR Scan initialized from properties set in the Configuration.
*
* @throws IOException
* @throws ClassNotFoundException
* @throws InterruptedException
*/
protected void testScanFromConfiguration(String start, String stop, String last)
throws IOException, InterruptedException, ClassNotFoundException {
String jobName = "ScanFromConfig" + (start != null ? start.toUpperCase() : "Empty") +
"To" + (stop != null ? stop.toUpperCase() : "Empty");
Configuration c = new Configuration(TEST_UTIL.getConfiguration());
c.set(TableInputFormat.INPUT_TABLE, Bytes.toString(TABLE_NAME));
c.set(TableInputFormat.SCAN_COLUMN_FAMILY, Bytes.toString(INPUT_FAMILY));
c.set(KEY_STARTROW, start != null ? start : "");
c.set(KEY_LASTROW, last != null ? last : "");
if (start != null) {
c.set(TableInputFormat.SCAN_ROW_START, start);
}
if (stop != null) {
c.set(TableInputFormat.SCAN_ROW_STOP, stop);
}
Job job = new Job(c, jobName);
job.setMapperClass(ScanMapper.class);
job.setReducerClass(ScanReducer.class);
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
job.setMapOutputValueClass(ImmutableBytesWritable.class);
job.setInputFormatClass(TableInputFormat.class);
job.setNumReduceTasks(1);
FileOutputFormat.setOutputPath(job, new Path(job.getJobName()));
TableMapReduceUtil.addDependencyJars(job);
assertTrue(job.waitForCompletion(true));
}
示例12: createJob
import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
/**
* Create Job object for submitting it, with all the configuration
*
* @return Reference to job object.
* @throws IOException - Exception if any
*/
private Job createJob() throws IOException {
String jobName = "distcp";
String userChosenName = getConf().get(JobContext.JOB_NAME);
if (userChosenName != null)
jobName += ": " + userChosenName;
Job job = Job.getInstance(getConf());
job.setJobName(jobName);
job.setInputFormatClass(DistCpUtils.getStrategy(getConf(), inputOptions));
job.setJarByClass(CopyMapper.class);
configureOutputFormat(job);
job.setMapperClass(CopyMapper.class);
job.setNumReduceTasks(0);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputFormatClass(CopyOutputFormat.class);
job.getConfiguration().set(JobContext.MAP_SPECULATIVE, "false");
job.getConfiguration().set(JobContext.NUM_MAPS,
String.valueOf(inputOptions.getMaxMaps()));
if (inputOptions.getSslConfigurationFile() != null) {
setupSSLConfig(job);
}
inputOptions.appendToConf(job.getConfiguration());
return job;
}
示例13: run
import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public void run() throws IOException, ClassNotFoundException, InterruptedException {
Job job = Job.getInstance(configuration, "com.romanysik.util.Transposer");
job.setJarByClass(MRNMF.class);
FileInputFormat.addInputPath(job, new Path(inputPath));
FileOutputFormat.setOutputPath(job, new Path(outputPath));
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setMapOutputKeyClass(LongWritable.class);
job.setMapOutputValueClass(Text.class);
job.setMapperClass(TMapper.class);
job.setReducerClass(TReducer.class);
job.waitForCompletion(true);
}
示例14: run
import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public void run() throws IOException, ClassNotFoundException, InterruptedException {
Job job = Job.getInstance(configuration, "com.romanysik.matrixmultiplication.MM1");
job.setJarByClass(MRNMF.class);
FileInputFormat.addInputPath(job, new Path(inputPath));
FileOutputFormat.setOutputPath(job, new Path(outputPath));
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(Text.class);
job.setMapperClass(MM1Mapper.class);
job.waitForCompletion(true);
}
示例15: main
import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public static void main(String[] args) throws ClassNotFoundException, IOException, InterruptedException {
// configure n-gram mapreduce job
Configuration conf1 = new Configuration();
conf1.set("textinputformat.record.delimiter", "."); // read a complete sentence as a line
conf1.set("GRAM_NUMBER", args[2]);
Job job1 = Job.getInstance(conf1);
job1.setNumReduceTasks(3);
job1.setJobName("NGram");
job1.setJarByClass(Dispatcher.class);
job1.setMapperClass(NGramBuilder.NGramMapper.class);
job1.setReducerClass(NGramBuilder.NGramReducer.class);
job1.setOutputKeyClass(Text.class);
job1.setOutputValueClass(IntWritable.class);
job1.setInputFormatClass(TextInputFormat.class); // default format: reads lines of text files
job1.setOutputFormatClass(TextOutputFormat.class); // default format: key \t value
TextInputFormat.setInputPaths(job1, new Path(args[0]));
TextOutputFormat.setOutputPath(job1, new Path(args[1]));
job1.waitForCompletion(true); // language model won't start to build until the n-gram library completely built
// configure language model mapreduce job
Configuration conf2 = new Configuration();
conf2.set("THRESHOLD", args[3]);
conf2.set("TOP_K", args[4]);
DBConfiguration.configureDB(conf2, "com.mysql.jdbc.Driver", "jdbc:mysql://127.0.0.1:3306/tp", "root", "123456"); // establish connection with mySQL database
Job job2 = Job.getInstance(conf2);
job2.setNumReduceTasks(3);
job2.setJobName("LModel");
job2.setJarByClass(Dispatcher.class);
job2.addArchiveToClassPath(new Path("/mysql/mysql-connector-java-5.1.39-bin.jar")); // putting this jar file into jre/lib/ext is recommended
job2.setMapperClass(LanguageModel.ModelMapper.class);
job2.setReducerClass(LanguageModel.ModelReducer.class);
job2.setMapOutputKeyClass(Text.class); // Mapper emits different key type than the Reducer
job2.setMapOutputValueClass(Text.class); // Mapper emits different value type than the Reducer
job2.setOutputKeyClass(DBOutputWritable.class);
job2.setOutputValueClass(NullWritable.class);
job2.setInputFormatClass(TextInputFormat.class);
job2.setOutputFormatClass(DBOutputFormat.class);
TextInputFormat.setInputPaths(job2, new Path(args[1]));
DBOutputFormat.setOutput(job2, "LanguageModel", new String[] {"starter", "follower", "probability"});
System.exit(job2.waitForCompletion(true) ? 0 : 1);
}