本文整理匯總了Java中org.apache.hadoop.hbase.mapreduce.TableMapper類的典型用法代碼示例。如果您正苦於以下問題:Java TableMapper類的具體用法?Java TableMapper怎麽用?Java TableMapper使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
TableMapper類屬於org.apache.hadoop.hbase.mapreduce包,在下文中一共展示了TableMapper類的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: initTableMapperJob
import org.apache.hadoop.hbase.mapreduce.TableMapper; //導入依賴的package包/類
public static void initTableMapperJob(String table, Scan scan,
Class<? extends TableMapper> mapper,
Class<? extends WritableComparable> outputKeyClass,
Class<? extends Writable> outputValueClass, Job job,
boolean addDependencyJars, Class<? extends InputFormat> inputFormatClass)
throws IOException {
job.setInputFormatClass(inputFormatClass);
if (outputValueClass != null) job.setMapOutputValueClass(outputValueClass);
if (outputKeyClass != null) job.setMapOutputKeyClass(outputKeyClass);
job.setMapperClass(mapper);
Configuration conf = job.getConfiguration();
HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
conf.set(TableInputFormat.INPUT_TABLE, table);
conf.set(TableInputFormat.SCAN, convertScanToString(scan));
if (addDependencyJars) {
addDependencyJars(job);
}
TableMapReduceUtil.initCredentials(job);
}
示例2: initTableMapperJob
import org.apache.hadoop.hbase.mapreduce.TableMapper; //導入依賴的package包/類
/**
* Use this before submitting a TableMap job. It will appropriately set up
* the job.
*
* @param table The table name to read from.
* @param scan The scan instance with the columns, time range etc.
* @param mapper The mapper class to use.
* @param outputKeyClass The class of the output key.
* @param outputValueClass The class of the output value.
* @param job The current job to adjust. Make sure the passed job is
* carrying all necessary HBase configuration.
* @param addDependencyJars upload HBase jars and jars for any of the configured
* job classes via the distributed cache (tmpjars).
* @throws IOException When setting up the details fails.
*/
public static void initTableMapperJob(String table1, String table2, Scan scan1, Scan scan2,
Class<? extends TableMapper> mapper,
Class<? extends WritableComparable> outputKeyClass,
Class<? extends Writable> outputValueClass, Job job,
boolean addDependencyJars, Class<? extends InputFormat> inputFormatClass)
throws IOException {
job.setInputFormatClass(inputFormatClass);
if (outputValueClass != null) job.setMapOutputValueClass(outputValueClass);
if (outputKeyClass != null) job.setMapOutputKeyClass(outputKeyClass);
job.setMapperClass(mapper);
Configuration conf = job.getConfiguration();
HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
conf.set(TableInputFormat.INPUT_TABLE1, table1);
conf.set(TableInputFormat.INPUT_TABLE2, table2);
conf.set(TableInputFormat.SCAN1, convertScanToString(scan1));
conf.set(TableInputFormat.SCAN2, convertScanToString(scan2));
if (addDependencyJars) {
addDependencyJars(job);
}
initCredentials(job);
}
示例3: startBulkLoad
import org.apache.hadoop.hbase.mapreduce.TableMapper; //導入依賴的package包/類
@SuppressWarnings("rawtypes")
private Job startBulkLoad(Configuration conf, String inputTable,
String tableName, Class<? extends TableMapper> clazz, Path outputDir)
throws Exception {
// Create our job to bulk load into HBase
Job job = Job.getInstance(conf, "HBase Bulk Loader");
job.setJarByClass(getClass());
// Initialize our mapper by specifying the input table
TableMapReduceUtil.initTableMapperJob(inputTable, new Scan(), clazz,
ImmutableBytesWritable.class, KeyValue.class, job);
HFileOutputFormat.configureIncrementalLoad(job, new HTable(conf,
tableName));
HFileOutputFormat.setOutputPath(job, outputDir);
// launch the job
job.waitForCompletion(true);
return job;
}
示例4: prepareTableMapper
import org.apache.hadoop.hbase.mapreduce.TableMapper; //導入依賴的package包/類
/**
* @param inputTable input table
* @param outputPath output path
* @param scanner table row scanner object
* @param mapper mapper class
* @param mapperKey table mapper key
* @param mapperValue table mapper value
* @param reducer reducer class
* @param reducerKey reducer key
* @param reducerValue reducer value
* @param outputFormat output (file) format
* @return a ready to run job - unless you need to assign more job specific settings
* @throws IOException
*/
@SuppressWarnings("rawtypes")
public Job prepareTableMapper(String inputTable, Path outputPath, Scan scanner, Class<? extends TableMapper> mapper,
Class<? extends WritableComparable> mapperKey, Class<? extends WritableComparable> mapperValue,
Class<? extends Reducer> reducer, Class<? extends Writable> reducerKey,
Class<? extends Writable> reducerValue, Class<? extends OutputFormat> outputFormat) throws IOException
{
setInputTable(inputTable);
// Configures the Job & starts it:
Configuration conf = getConf();
Job job = new Job(conf);
job.setJobName(getCustomJobName(job, mapper, reducer));
// mapper:
TableMapReduceUtil.initTableMapperJob(getInputTable(), scanner, mapper, mapperKey, mapperValue, job);
job.setReducerClass(reducer);
job.setOutputKeyClass(reducerKey);
job.setOutputValueClass(reducerValue);
job.setOutputFormatClass(outputFormat);
FileOutputFormat.setOutputPath(job, outputPath);
return job;
}
示例5: wrap
import org.apache.hadoop.hbase.mapreduce.TableMapper; //導入依賴的package包/類
@SuppressWarnings("rawtypes")
public static Class<? extends TableMapper> wrap(String type) {
Class<? extends TableMapper> c = IndexMapper.class;
if (null != type && type.length() > 0) {
if (type.equals(Const.MAPPER_TYPE_JSON))
c = IndexJsonMapper.class;
else if (type.equals(Const.MAPPER_TYPE_ROWKEY))
c = IndexRowkeyMapper.class;
}
return c;
}
示例6: createMapDriver
import org.apache.hadoop.hbase.mapreduce.TableMapper; //導入依賴的package包/類
public TableMapDriver<ImmutableBytesWritable, IntWritable> createMapDriver(TableMapper<ImmutableBytesWritable, IntWritable> tableMapper) {
TableMapDriver<ImmutableBytesWritable, IntWritable> mapDriver = TableMapDriver.newTableMapDriver(tableMapper);
configure(mapDriver.getConfiguration());
return mapDriver;
}