本文整理汇总了Java中org.apache.hive.hcatalog.mapreduce.HCatInputFormat.setInput方法的典型用法代码示例。如果您正苦于以下问题:Java HCatInputFormat.setInput方法的具体用法?Java HCatInputFormat.setInput怎么用?Java HCatInputFormat.setInput使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hive.hcatalog.mapreduce.HCatInputFormat
的用法示例。
在下文中一共展示了HCatInputFormat.setInput方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: run
import org.apache.hive.hcatalog.mapreduce.HCatInputFormat; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws Exception {
if (args.length != 3) {
System.err.println("Usage: hiveload -D" + RDF_MIME_TYPE_PROPERTY + "='application/ld+json' [-D" + MRJobConfig.QUEUE_NAME + "=proofofconcepts] [-D" + HIVE_DATA_COLUMN_INDEX_PROPERTY + "=3] [-D" + BASE_URI_PROPERTY + "='http://my_base_uri/'] [-D" + HalyardBulkLoad.SPLIT_BITS_PROPERTY + "=8] [-D" + HalyardBulkLoad.DEFAULT_CONTEXT_PROPERTY + "=http://new_context] [-D" + HalyardBulkLoad.OVERRIDE_CONTEXT_PROPERTY + "=true] <hive_table_name> <output_path> <hbase_table_name>");
return -1;
}
TableMapReduceUtil.addDependencyJars(getConf(),
NTriplesUtil.class,
Rio.class,
AbstractRDFHandler.class,
RDFFormat.class,
RDFParser.class);
HBaseConfiguration.addHbaseResources(getConf());
getConf().setLong(DEFAULT_TIMESTAMP_PROPERTY, getConf().getLong(DEFAULT_TIMESTAMP_PROPERTY, System.currentTimeMillis()));
Job job = Job.getInstance(getConf(), "HalyardHiveLoad -> " + args[1] + " -> " + args[2]);
int i = args[0].indexOf('.');
HCatInputFormat.setInput(job, i > 0 ? args[0].substring(0, i) : null, args[0].substring(i + 1));
job.setJarByClass(HalyardHiveLoad.class);
job.setMapperClass(HiveMapper.class);
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
job.setMapOutputValueClass(KeyValue.class);
job.setInputFormatClass(HCatInputFormat.class);
job.setSpeculativeExecution(false);
job.setReduceSpeculativeExecution(false);
try (HTable hTable = HalyardTableUtils.getTable(getConf(), args[2], true, getConf().getInt(HalyardBulkLoad.SPLIT_BITS_PROPERTY, 3))) {
HFileOutputFormat2.configureIncrementalLoad(job, hTable.getTableDescriptor(), hTable.getRegionLocator());
FileInputFormat.setInputDirRecursive(job, true);
FileInputFormat.setInputPaths(job, args[0]);
FileOutputFormat.setOutputPath(job, new Path(args[1]));
TableMapReduceUtil.addDependencyJars(job);
TableMapReduceUtil.initCredentials(job);
if (job.waitForCompletion(true)) {
new LoadIncrementalHFiles(getConf()).doBulkLoad(new Path(args[1]), hTable);
LOG.info("Bulk Load Completed..");
return 0;
}
}
return -1;
}
示例2: configureJob
import org.apache.hive.hcatalog.mapreduce.HCatInputFormat; //导入方法依赖的package包/类
@Override
public void configureJob(Job job) {
try {
job.getConfiguration().addResource("hive-site.xml");
HCatInputFormat.setInput(job, dbName, tableName);
job.setInputFormatClass(HCatInputFormat.class);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
示例3: setupMapper
import org.apache.hive.hcatalog.mapreduce.HCatInputFormat; //导入方法依赖的package包/类
private void setupMapper(String intermediateTable) throws IOException {
// FileInputFormat.setInputPaths(job, input);
String[] dbTableNames = HadoopUtil.parseHiveTableName(intermediateTable);
HCatInputFormat.setInput(job, dbTableNames[0],
dbTableNames[1]);
job.setInputFormatClass(HCatInputFormat.class);
job.setMapperClass(FactDistinctColumnsMapper.class);
job.setCombinerClass(FactDistinctColumnsCombiner.class);
job.setMapOutputKeyClass(ShortWritable.class);
job.setMapOutputValueClass(Text.class);
}
示例4: setupMapper
import org.apache.hive.hcatalog.mapreduce.HCatInputFormat; //导入方法依赖的package包/类
private void setupMapper(String intermediateTable) throws IOException {
String[] dbTableNames = HadoopUtil.parseHiveTableName(intermediateTable);
HCatInputFormat.setInput(job, dbTableNames[0],
dbTableNames[1]);
job.setInputFormatClass(HCatInputFormat.class);
job.setMapperClass(InvertedIndexMapper.class);
job.setMapOutputKeyClass(LongWritable.class);
job.setMapOutputValueClass(ImmutableBytesWritable.class);
job.setPartitionerClass(InvertedIndexPartitioner.class);
}
示例5: createHiveTableRDD
import org.apache.hive.hcatalog.mapreduce.HCatInputFormat; //导入方法依赖的package包/类
/**
* Creates the hive table rdd.
*
* @param javaSparkContext the java spark context
* @param conf the conf
* @param db the db
* @param table the table
* @param partitionFilter the partition filter
* @return the java pair rdd
* @throws IOException Signals that an I/O exception has occurred.
*/
public static JavaPairRDD<WritableComparable, HCatRecord> createHiveTableRDD(JavaSparkContext javaSparkContext,
Configuration conf, String db, String table, String partitionFilter) throws IOException {
HCatInputFormat.setInput(conf, db, table, partitionFilter);
JavaPairRDD<WritableComparable, HCatRecord> rdd = javaSparkContext.newAPIHadoopRDD(conf,
HCatInputFormat.class, // Input
WritableComparable.class, // input key class
HCatRecord.class); // input value class
return rdd;
}
示例6: run
import org.apache.hive.hcatalog.mapreduce.HCatInputFormat; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws Exception {
Configuration conf = getConf();
args = new GenericOptionsParser(conf, args).getRemainingArgs();
final String jobName = args[0];
final String dbName = args[1];
final String inTableName = args[2];
final String outPath = args[3];
System.out.println("jobname: " + jobName);
System.out.println("dbName: " + dbName);
System.out.println("inTableName: " + inTableName);
System.out.println("outPath: " + outPath);
Job job = Job.getInstance(conf, jobName);
job.setInputFormatClass(HCatInputFormat.class);
job.setJarByClass(CMSStatePaymentsTool.class);
job.setMapperClass(CMSStatePaymentsMapper.class);
job.setReducerClass(CMSStatePaymentsReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
HCatInputFormat.setInput(job, dbName, inTableName);
FileOutputFormat.setOutputPath(job, new Path(outPath));
return (job.waitForCompletion(true) ? 0 : 1);
}
示例7: run
import org.apache.hive.hcatalog.mapreduce.HCatInputFormat; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws Exception {
Options options = new Options();
try {
options.addOption(OPTION_TABLE);
options.addOption(OPTION_OUTPUT_PATH);
parseOptions(options, args);
// start job
String jobName = JOB_TITLE + getOptionsAsString();
System.out.println("Starting: " + jobName);
Configuration conf = getConf();
job = Job.getInstance(conf, jobName);
setJobClasspath(job);
Path output = new Path(getOptionValue(OPTION_OUTPUT_PATH));
FileOutputFormat.setOutputPath(job, output);
job.getConfiguration().set("dfs.block.size", "67108864");
// Mapper
String table = getOptionValue(OPTION_TABLE);
String[] dbTableNames = HadoopUtil.parseHiveTableName(table);
HCatInputFormat.setInput(job, dbTableNames[0], dbTableNames[1]);
job.setInputFormatClass(HCatInputFormat.class);
job.setMapperClass(ColumnCardinalityMapper.class);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(BytesWritable.class);
// Reducer - only one
job.setReducerClass(ColumnCardinalityReducer.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(LongWritable.class);
job.setNumReduceTasks(1);
this.deletePath(job.getConfiguration(), output);
System.out.println("Going to submit HiveColumnCardinalityJob for table '" + table + "'");
int result = waitForCompletion(job);
return result;
} catch (Exception e) {
printUsage(options);
throw e;
}
}
示例8: setupMapper
import org.apache.hive.hcatalog.mapreduce.HCatInputFormat; //导入方法依赖的package包/类
private void setupMapper() throws IOException {
String tableName = job.getConfiguration().get(BatchConstants.TABLE_NAME);
String[] dbTableNames = HadoopUtil.parseHiveTableName(tableName);
log.info("setting hcat input format, db name {} , table name {}", dbTableNames[0],dbTableNames[1]);
HCatInputFormat.setInput(job, dbTableNames[0], dbTableNames[1]);
job.setInputFormatClass(HCatInputFormat.class);
job.setMapperClass(IIDistinctColumnsMapper.class);
job.setCombinerClass(IIDistinctColumnsCombiner.class);
job.setMapOutputKeyClass(ShortWritable.class);
job.setMapOutputValueClass(Text.class);
}
示例9: validate
import org.apache.hive.hcatalog.mapreduce.HCatInputFormat; //导入方法依赖的package包/类
/**
* Validate.
*
* @return true, if successful
*/
boolean validate() {
List<HCatFieldSchema> columns;
try {
HCatInputFormat.setInput(conf, database == null ? "default" : database, table, partitionFilter);
HCatSchema tableSchema = HCatInputFormat.getTableSchema(conf);
columns = tableSchema.getFields();
} catch (IOException exc) {
log.error("Error getting table info {}", toString(), exc);
return false;
}
log.info("{} columns {}", table, columns.toString());
boolean valid = false;
if (columns != null && !columns.isEmpty()) {
// Check labeled column
List<String> columnNames = new ArrayList<String>();
for (HCatFieldSchema col : columns) {
columnNames.add(col.getName());
}
// Need at least one feature column and one label column
valid = columnNames.contains(labelColumn) && columnNames.size() > 1;
if (valid) {
labelPos = columnNames.indexOf(labelColumn);
// Check feature columns
if (featureColumns == null || featureColumns.isEmpty()) {
// feature columns are not provided, so all columns except label column are feature columns
featurePositions = new int[columnNames.size() - 1];
int p = 0;
for (int i = 0; i < columnNames.size(); i++) {
if (i == labelPos) {
continue;
}
featurePositions[p++] = i;
}
columnNames.remove(labelPos);
featureColumns = columnNames;
} else {
// Feature columns were provided, verify all feature columns are present in the table
valid = columnNames.containsAll(featureColumns);
if (valid) {
// Get feature positions
featurePositions = new int[featureColumns.size()];
for (int i = 0; i < featureColumns.size(); i++) {
featurePositions[i] = columnNames.indexOf(featureColumns.get(i));
}
}
}
numFeatures = featureColumns.size();
}
}
return valid;
}