本文整理汇总了Java中org.apache.hadoop.io.DoubleWritable类的典型用法代码示例。如果您正苦于以下问题:Java DoubleWritable类的具体用法?Java DoubleWritable怎么用?Java DoubleWritable使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
DoubleWritable类属于org.apache.hadoop.io包,在下文中一共展示了DoubleWritable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job =Job.getInstance(conf);
job.setJobName("TF-IDFCount");
job.setJarByClass(TF_IDF.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(TextArrayWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(DoubleWritable.class);
job.setMapperClass(TF_IDFMap.class);
job.setReducerClass(TF_IDFReduce.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileInputFormat.addInputPath(job, new Path(args[1]));
FileOutputFormat.setOutputPath(job, new Path(args[2]));
boolean wait = job.waitForCompletion(true);
System.exit(wait ? 0 : 1);
}
示例2: main
import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
conf.setFloat("beta", Float.parseFloat(args[3]));
Job job = Job.getInstance(conf);
job.setJarByClass(UnitSum.class);
ChainMapper.addMapper(job, PassMapper.class, Object.class, Text.class, Text.class, DoubleWritable.class, conf);
ChainMapper.addMapper(job, BetaMapper.class, Text.class, DoubleWritable.class, Text.class, DoubleWritable.class, conf);
job.setReducerClass(SumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(DoubleWritable.class);
MultipleInputs.addInputPath(job, new Path(args[0]), TextInputFormat.class, PassMapper.class);
MultipleInputs.addInputPath(job, new Path(args[1]), TextInputFormat.class, BetaMapper.class);
FileOutputFormat.setOutputPath(job, new Path(args[2]));
job.waitForCompletion(true);
}
示例3: readFields
import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
/**
* Read (say, deserialize) an employee
*/
@Override
public void readFields(DataInput in) throws IOException {
name = new Text();
name.readFields(in);
address = new Text();
address.readFields(in);
company = new Text();
company.readFields(in);
salary = new DoubleWritable();
salary.readFields(in);
department = new Text();
department.readFields(in);
isManager = new BooleanWritable();
isManager.readFields(in);
}
示例4: main
import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
conf.set("xmlinput.start", "<page>");
conf.set("xmlinput.end", "</page>");
Job job =Job.getInstance(conf);
job.setJobName("TermFrequencyCount");
job.setJarByClass(TF.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntArrayWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(DoubleWritable.class);
job.setMapperClass(TFMap.class);
job.setReducerClass(TFReduce.class);
job.setInputFormatClass(XmlInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
boolean wait = job.waitForCompletion(true);
System.exit(wait ? 0 : 1);
}
示例5: main
import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(Multiplication.class);
ChainMapper.addMapper(job, CooccurrenceMapper.class, LongWritable.class, Text.class, Text.class, Text.class, conf);
ChainMapper.addMapper(job, RatingMapper.class, Text.class, Text.class, Text.class, Text.class, conf);
job.setMapperClass(CooccurrenceMapper.class);
job.setMapperClass(RatingMapper.class);
job.setReducerClass(MultiplicationReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(DoubleWritable.class);
MultipleInputs.addInputPath(job, new Path(args[0]), TextInputFormat.class, CooccurrenceMapper.class);
MultipleInputs.addInputPath(job, new Path(args[1]), TextInputFormat.class, RatingMapper.class);
TextOutputFormat.setOutputPath(job, new Path(args[2]));
job.waitForCompletion(true);
}
示例6: main
import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setMapperClass(SumMapper.class);
job.setReducerClass(SumReducer.class);
job.setJarByClass(Sum.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(DoubleWritable.class);
TextInputFormat.setInputPaths(job, new Path(args[0]));
TextOutputFormat.setOutputPath(job, new Path(args[1]));
job.waitForCompletion(true);
}
示例7: init
import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
@Override
public void init() throws IOException {
registerKey(NullWritable.class.getName(), NullWritableSerializer.class);
registerKey(Text.class.getName(), TextSerializer.class);
registerKey(LongWritable.class.getName(), LongWritableSerializer.class);
registerKey(IntWritable.class.getName(), IntWritableSerializer.class);
registerKey(Writable.class.getName(), DefaultSerializer.class);
registerKey(BytesWritable.class.getName(), BytesWritableSerializer.class);
registerKey(BooleanWritable.class.getName(), BoolWritableSerializer.class);
registerKey(ByteWritable.class.getName(), ByteWritableSerializer.class);
registerKey(FloatWritable.class.getName(), FloatWritableSerializer.class);
registerKey(DoubleWritable.class.getName(), DoubleWritableSerializer.class);
registerKey(VIntWritable.class.getName(), VIntWritableSerializer.class);
registerKey(VLongWritable.class.getName(), VLongWritableSerializer.class);
LOG.info("Hadoop platform inited");
}
示例8: configPass2
import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
protected JobConf configPass2 () throws Exception
{
final JobConf conf = new JobConf(getConf(), MatvecNaive.class);
conf.set("number_nodes", "" + number_nodes);
conf.setJobName("MatvecNaive_pass2");
conf.setMapperClass(MapPass2.class);
conf.setReducerClass(RedPass2.class);
FileInputFormat.setInputPaths(conf, tempmv_path);
FileOutputFormat.setOutputPath(conf, output_path);
conf.setNumReduceTasks( nreducer );
conf.setOutputKeyClass(LongWritable.class);
conf.setMapOutputValueClass(DoubleWritable.class);
conf.setOutputValueClass(Text.class);
return conf;
}
示例9: run
import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
@Override
public int run(String[] args) throws Exception {
try {
final Configuration configuration = HBaseConfiguration.create(getConf());
setConf(configuration);
final Job job = Job.getInstance(configuration, "phoenix-mr-order_stats-job");
final String selectQuery = "SELECT ORDER_ID, CUST_ID, AMOUNT FROM ORDERS ";
// set the input table and select query. you can also pass in the list of columns
PhoenixMapReduceUtil.setInput(job, OrderWritable.class, "ORDERS", selectQuery);
// set the output table name and the list of columns.
PhoenixMapReduceUtil.setOutput(job, "ORDER_STATS", "CUST_ID, AMOUNT");
job.setMapperClass(OrderMapper.class);
job.setReducerClass(OrderReducer.class);
job.setOutputFormatClass(PhoenixOutputFormat.class);
job.setMapOutputKeyClass(LongWritable.class);
job.setMapOutputValueClass(DoubleWritable.class);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(OrderWritable.class);
TableMapReduceUtil.addDependencyJars(job);
job.waitForCompletion(true);
return 0;
} catch (Exception ex) {
LOG.error(String.format("An exception [%s] occurred while performing the job: ", ex.getMessage()));
return -1;
}
}
示例10: testWriteDouble
import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
@Test
public void testWriteDouble() throws Exception {
if (!canTest()) {
return;
}
Double aDouble = 12.34D;
template.sendBody("direct:write_double", aDouble);
Configuration conf = new Configuration();
Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-double");
SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
reader.next(key, value);
Double rDouble = ((DoubleWritable) value).get();
assertEquals(rDouble, aDouble);
IOHelper.close(reader);
}
示例11: testWriteDouble
import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
@Test
public void testWriteDouble() throws Exception {
if (!canTest()) {
return;
}
Double aDouble = 12.34D;
template.sendBody("direct:write_double", aDouble);
Configuration conf = new Configuration();
Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-double");
FileSystem fs1 = FileSystem.get(file1.toUri(), conf);
SequenceFile.Reader reader = new SequenceFile.Reader(fs1, file1, conf);
Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
reader.next(key, value);
Double rDouble = ((DoubleWritable) value).get();
assertEquals(rDouble, aDouble);
IOHelper.close(reader);
}
示例12: run
import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
public void run(Configuration conf, Path matrixInputPath,
String meanSpanFileName, Path matrixOutputPath) throws IOException,
InterruptedException, ClassNotFoundException {
conf.set(MEANSPANOPTION, meanSpanFileName);
Job job = new Job(conf);
job.setJobName("Norm2Job");
job.setJarByClass(Norm2Job.class);
FileSystem fs = FileSystem.get(matrixInputPath.toUri(), conf);
matrixInputPath = fs.makeQualified(matrixInputPath);
matrixOutputPath = fs.makeQualified(matrixOutputPath);
FileInputFormat.addInputPath(job, matrixInputPath);
job.setInputFormatClass(SequenceFileInputFormat.class);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
FileOutputFormat.setOutputPath(job, matrixOutputPath);
job.setMapperClass(MyMapper.class);
job.setReducerClass(MyReducer.class);
job.setNumReduceTasks(1);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(DoubleWritable.class);
job.submit();
job.waitForCompletion(true);
}
示例13: loadResult
import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
public double loadResult(Path outputDirPath, Configuration conf) throws IOException {
Path finalNumberFile = new Path(outputDirPath, "part-r-00000");
SequenceFileIterator<NullWritable, DoubleWritable> iterator =
new SequenceFileIterator<NullWritable, DoubleWritable>(
finalNumberFile, true, conf);
double norm2;
try {
Pair<NullWritable, DoubleWritable> next = iterator.next();
norm2 = next.getSecond().get();
if (iterator.hasNext())
throw new IOException("More than one value after norm2Job!");
} finally {
Closeables.close(iterator, false);
}
return norm2;
}
示例14: verifyReducerOutput
import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
private void verifyReducerOutput(
DummyRecordWriter<IntWritable, DoubleWritable> writer) {
Assert.assertEquals("The reducer should output three key!", 3, writer
.getKeys().size());
for (IntWritable key : writer.getKeys()) {
List<DoubleWritable> list = writer.getValue(key);
assertEquals("reducer produces more than one values per key!", 1,
list.size());
Double value = list.get(0).get();
switch (key.get()) {
case 0:
assertEquals("the computed reconstructionError is incorrect!",
reconstructionError, value, EPSILON);
break;
case 1:
assertEquals("the computed yNorm is incorrect!", yNorm, value, EPSILON);
break;
case 2:
assertEquals("the computed centralizedYNorm is incorrect!",
centralizedYNorm, value, EPSILON);
break;
default:
fail("Unknown key in reading the results: " + key);
}
}
}
示例15: convertJavaToHadoop
import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
private Class<? extends WritableComparable> convertJavaToHadoop(final Class klass) {
if (klass.equals(String.class)) {
return Text.class;
} else if (klass.equals(Integer.class)) {
return IntWritable.class;
} else if (klass.equals(Double.class)) {
return DoubleWritable.class;
} else if (klass.equals(Long.class)) {
return LongWritable.class;
} else if (klass.equals(Float.class)) {
return FloatWritable.class;
} else if (klass.equals(Boolean.class)) {
return BooleanWritable.class;
} else {
throw new IllegalArgumentException("The provided class is not supported: " + klass.getSimpleName());
}
}