本文整理汇总了Java中org.apache.hadoop.mapreduce.lib.db.DBInputFormat.setInput方法的典型用法代码示例。如果您正苦于以下问题:Java DBInputFormat.setInput方法的具体用法?Java DBInputFormat.setInput怎么用?Java DBInputFormat.setInput使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapreduce.lib.db.DBInputFormat
的用法示例。
在下文中一共展示了DBInputFormat.setInput方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.apache.hadoop.mapreduce.lib.db.DBInputFormat; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException, Exception {
Configuration conf = new Configuration();
Job job = new Job(conf);
job.setJarByClass(DBToFileMapReduce.class);
DistributedCache.addFileToClassPath(new Path(
"/lib/mysql-connector-java-5.1.0-bin.jar"), conf);
job.setMapperClass(DBInputMapper.class);
job.setReducerClass(Reducer.class);
job.setMapOutputKeyClass(LongWritable.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(Text.class);
job.setInputFormatClass(DBInputFormat.class);
Path outputPath = new Path("/hua01");
FileOutputFormat.setOutputPath(job, outputPath);
outputPath.getFileSystem(conf).delete(outputPath, true);
DBConfiguration.configureDB(conf, "com.mysql.jdbc.Driver",
"jdbc:mysql://192.168.3.244:3306/hadoop", "hua", "hadoop");
String[] fields = { "id", "name" };
DBInputFormat.setInput(job, StudentinfoRecord.class, "studentinfo", null, "id", fields);
job.waitForCompletion(true);
}
示例2: run
import org.apache.hadoop.mapreduce.lib.db.DBInputFormat; //导入方法依赖的package包/类
@Override
//Usage DBCountPageView [driverClass dburl]
public int run(String[] args) throws Exception {
String driverClassName = DRIVER_CLASS;
String url = DB_URL;
if(args.length > 1) {
driverClassName = args[0];
url = args[1];
}
initialize(driverClassName, url);
Configuration conf = getConf();
DBConfiguration.configureDB(conf, driverClassName, url);
Job job = new Job(conf);
job.setJobName("Count Pageviews of URLs");
job.setJarByClass(DBCountPageView.class);
job.setMapperClass(PageviewMapper.class);
job.setCombinerClass(LongSumReducer.class);
job.setReducerClass(PageviewReducer.class);
DBInputFormat.setInput(job, AccessRecord.class, "Access"
, null, "url", AccessFieldNames);
DBOutputFormat.setOutput(job, "Pageview", PageviewFieldNames);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setOutputKeyClass(PageviewRecord.class);
job.setOutputValueClass(NullWritable.class);
int ret;
try {
ret = job.waitForCompletion(true) ? 0 : 1;
boolean correct = verify();
if(!correct) {
throw new RuntimeException("Evaluation was not correct!");
}
} finally {
shutdown();
}
return ret;
}
示例3: run
import org.apache.hadoop.mapreduce.lib.db.DBInputFormat; //导入方法依赖的package包/类
@Override
//Usage DBCountPageView [driverClass dburl]
public int run(String[] args) throws Exception {
String driverClassName = DRIVER_CLASS;
String url = DB_URL;
if(args.length > 1) {
driverClassName = args[0];
url = args[1];
}
initialize(driverClassName, url);
Configuration conf = getConf();
DBConfiguration.configureDB(conf, driverClassName, url);
Job job = Job.getInstance(conf);
job.setJobName("Count Pageviews of URLs");
job.setJarByClass(DBCountPageView.class);
job.setMapperClass(PageviewMapper.class);
job.setCombinerClass(LongSumReducer.class);
job.setReducerClass(PageviewReducer.class);
DBInputFormat.setInput(job, AccessRecord.class, "HAccess"
, null, "url", AccessFieldNames);
DBOutputFormat.setOutput(job, "Pageview", PageviewFieldNames);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setOutputKeyClass(PageviewRecord.class);
job.setOutputValueClass(NullWritable.class);
int ret;
try {
ret = job.waitForCompletion(true) ? 0 : 1;
boolean correct = verify();
if(!correct) {
throw new RuntimeException("Evaluation was not correct!");
}
} finally {
shutdown();
}
return ret;
}