当前位置: 首页>>代码示例>>Java>>正文


Java DoubleWritable类代码示例

本文整理汇总了Java中org.apache.hadoop.io.DoubleWritable的典型用法代码示例。如果您正苦于以下问题:Java DoubleWritable类的具体用法?Java DoubleWritable怎么用?Java DoubleWritable使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DoubleWritable类属于org.apache.hadoop.io包,在下文中一共展示了DoubleWritable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
	Configuration conf = new Configuration();
	
	Job job =Job.getInstance(conf);
	job.setJobName("TF-IDFCount");
	job.setJarByClass(TF_IDF.class);
	
	job.setMapOutputKeyClass(Text.class);
	job.setMapOutputValueClass(TextArrayWritable.class);
	
	job.setOutputKeyClass(Text.class);
	job.setOutputValueClass(DoubleWritable.class);
	
	job.setMapperClass(TF_IDFMap.class);
	job.setReducerClass(TF_IDFReduce.class);
	
	job.setInputFormatClass(TextInputFormat.class);
	job.setOutputFormatClass(TextOutputFormat.class);
	
	FileInputFormat.addInputPath(job, new Path(args[0]));
	FileInputFormat.addInputPath(job, new Path(args[1]));
	FileOutputFormat.setOutputPath(job, new Path(args[2]));
	boolean wait = job.waitForCompletion(true);
	System.exit(wait ? 0 : 1);
}
 
开发者ID:lzmhhh123,项目名称:Wikipedia-Index,代码行数:26,代码来源:TF_IDF.java

示例2: main

import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
public static void main(String[] args) throws Exception {

        Configuration conf = new Configuration();
        conf.setFloat("beta", Float.parseFloat(args[3]));
        Job job = Job.getInstance(conf);
        job.setJarByClass(UnitSum.class);

        ChainMapper.addMapper(job, PassMapper.class, Object.class, Text.class, Text.class, DoubleWritable.class, conf);
        ChainMapper.addMapper(job, BetaMapper.class, Text.class, DoubleWritable.class, Text.class, DoubleWritable.class, conf);

        job.setReducerClass(SumReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(DoubleWritable.class);

        MultipleInputs.addInputPath(job, new Path(args[0]), TextInputFormat.class, PassMapper.class);
        MultipleInputs.addInputPath(job, new Path(args[1]), TextInputFormat.class, BetaMapper.class);

        FileOutputFormat.setOutputPath(job, new Path(args[2]));
        job.waitForCompletion(true);
    }
 
开发者ID:yogykwan,项目名称:mapreduce-samples,代码行数:21,代码来源:UnitSum.java

示例3: readFields

import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
/**
 * Read (say, deserialize) an employee
 */
@Override
public void readFields(DataInput in) throws IOException {
	name = new Text();
	name.readFields(in);
	address = new Text();
	address.readFields(in);
	company = new Text();
	company.readFields(in);
	salary = new DoubleWritable();
	salary.readFields(in);
	department = new Text();
	department.readFields(in);
	isManager = new BooleanWritable();
	isManager.readFields(in);
}
 
开发者ID:amritbhat786,项目名称:DocIT,代码行数:19,代码来源:Employee.java

示例4: main

import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
	Configuration conf = new Configuration();
	conf.set("xmlinput.start", "<page>");
	conf.set("xmlinput.end", "</page>");
	
	Job job =Job.getInstance(conf);
	job.setJobName("TermFrequencyCount");
	job.setJarByClass(TF.class);
	
	job.setMapOutputKeyClass(Text.class);
	job.setMapOutputValueClass(IntArrayWritable.class);
	
	job.setOutputKeyClass(Text.class);
	job.setOutputValueClass(DoubleWritable.class);
	
	job.setMapperClass(TFMap.class);
	job.setReducerClass(TFReduce.class);
	
	job.setInputFormatClass(XmlInputFormat.class);
	job.setOutputFormatClass(TextOutputFormat.class);
	
	FileInputFormat.addInputPath(job, new Path(args[0]));
	FileOutputFormat.setOutputPath(job, new Path(args[1]));
	boolean wait = job.waitForCompletion(true);
	System.exit(wait ? 0 : 1);
}
 
开发者ID:lzmhhh123,项目名称:Wikipedia-Index,代码行数:27,代码来源:TF.java

示例5: main

import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
	Configuration conf = new Configuration();

	Job job = Job.getInstance(conf);
	job.setJarByClass(Multiplication.class);

	ChainMapper.addMapper(job, CooccurrenceMapper.class, LongWritable.class, Text.class, Text.class, Text.class, conf);
	ChainMapper.addMapper(job, RatingMapper.class, Text.class, Text.class, Text.class, Text.class, conf);

	job.setMapperClass(CooccurrenceMapper.class);
	job.setMapperClass(RatingMapper.class);

	job.setReducerClass(MultiplicationReducer.class);

	job.setMapOutputKeyClass(Text.class);
	job.setMapOutputValueClass(Text.class);
	job.setOutputKeyClass(Text.class);
	job.setOutputValueClass(DoubleWritable.class);

	MultipleInputs.addInputPath(job, new Path(args[0]), TextInputFormat.class, CooccurrenceMapper.class);
	MultipleInputs.addInputPath(job, new Path(args[1]), TextInputFormat.class, RatingMapper.class);

	TextOutputFormat.setOutputPath(job, new Path(args[2]));
	
	job.waitForCompletion(true);
}
 
开发者ID:yogykwan,项目名称:mapreduce-samples,代码行数:27,代码来源:Multiplication.java

示例6: main

import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
public static void main(String[] args) throws Exception {

        Configuration conf = new Configuration();

        Job job = Job.getInstance(conf);
        job.setMapperClass(SumMapper.class);
        job.setReducerClass(SumReducer.class);

        job.setJarByClass(Sum.class);

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(DoubleWritable.class);

        TextInputFormat.setInputPaths(job, new Path(args[0]));
        TextOutputFormat.setOutputPath(job, new Path(args[1]));

        job.waitForCompletion(true);
    }
 
开发者ID:yogykwan,项目名称:mapreduce-samples,代码行数:21,代码来源:Sum.java

示例7: init

import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
@Override
public void init() throws IOException {
  registerKey(NullWritable.class.getName(), NullWritableSerializer.class);
  registerKey(Text.class.getName(), TextSerializer.class);
  registerKey(LongWritable.class.getName(), LongWritableSerializer.class);
  registerKey(IntWritable.class.getName(), IntWritableSerializer.class);
  registerKey(Writable.class.getName(), DefaultSerializer.class);
  registerKey(BytesWritable.class.getName(), BytesWritableSerializer.class);
  registerKey(BooleanWritable.class.getName(), BoolWritableSerializer.class);
  registerKey(ByteWritable.class.getName(), ByteWritableSerializer.class);
  registerKey(FloatWritable.class.getName(), FloatWritableSerializer.class);
  registerKey(DoubleWritable.class.getName(), DoubleWritableSerializer.class);
  registerKey(VIntWritable.class.getName(), VIntWritableSerializer.class);
  registerKey(VLongWritable.class.getName(), VLongWritableSerializer.class);

  LOG.info("Hadoop platform inited");
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:18,代码来源:HadoopPlatform.java

示例8: configPass2

import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
protected JobConf configPass2 () throws Exception
  {
final JobConf conf = new JobConf(getConf(), MatvecNaive.class);
conf.set("number_nodes", "" + number_nodes);

conf.setJobName("MatvecNaive_pass2");

conf.setMapperClass(MapPass2.class);        
conf.setReducerClass(RedPass2.class);

FileInputFormat.setInputPaths(conf, tempmv_path);  
FileOutputFormat.setOutputPath(conf, output_path);  

conf.setNumReduceTasks( nreducer );

conf.setOutputKeyClass(LongWritable.class);
conf.setMapOutputValueClass(DoubleWritable.class);
conf.setOutputValueClass(Text.class);

return conf;
  }
 
开发者ID:thrill,项目名称:fst-bench,代码行数:22,代码来源:MatvecNaive.java

示例9: run

import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
@Override
public int run(String[] args) throws Exception {
    try {
        final Configuration configuration = HBaseConfiguration.create(getConf());
        setConf(configuration);
        final Job job = Job.getInstance(configuration, "phoenix-mr-order_stats-job");
        final String selectQuery = "SELECT ORDER_ID, CUST_ID, AMOUNT FROM ORDERS ";
        // set the input table and select query. you can also pass in the list of columns
        PhoenixMapReduceUtil.setInput(job, OrderWritable.class, "ORDERS", selectQuery);
        // set the output table name and the list of columns.
        PhoenixMapReduceUtil.setOutput(job, "ORDER_STATS", "CUST_ID, AMOUNT");
        job.setMapperClass(OrderMapper.class);
        job.setReducerClass(OrderReducer.class);
        job.setOutputFormatClass(PhoenixOutputFormat.class);
        job.setMapOutputKeyClass(LongWritable.class);
        job.setMapOutputValueClass(DoubleWritable.class);
        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(OrderWritable.class);
        TableMapReduceUtil.addDependencyJars(job);
        job.waitForCompletion(true);
        return 0;
    } catch (Exception ex) {
        LOG.error(String.format("An exception [%s] occurred while performing the job: ", ex.getMessage()));
        return -1;
    }
}
 
开发者ID:mravi,项目名称:pro-phoenix,代码行数:27,代码来源:OrderStatsApp.java

示例10: testWriteDouble

import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
@Test
public void testWriteDouble() throws Exception {
    if (!canTest()) {
        return;
    }
    Double aDouble = 12.34D;
    template.sendBody("direct:write_double", aDouble);

    Configuration conf = new Configuration();
    Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-double");
    SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
    Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
    Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
    reader.next(key, value);
    Double rDouble = ((DoubleWritable) value).get();
    assertEquals(rDouble, aDouble);

    IOHelper.close(reader);
}
 
开发者ID:HydAu,项目名称:Camel,代码行数:20,代码来源:HdfsProducerTest.java

示例11: testWriteDouble

import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
@Test
public void testWriteDouble() throws Exception {
    if (!canTest()) {
        return;
    }
    Double aDouble = 12.34D;
    template.sendBody("direct:write_double", aDouble);

    Configuration conf = new Configuration();
    Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-double");
    FileSystem fs1 = FileSystem.get(file1.toUri(), conf);
    SequenceFile.Reader reader = new SequenceFile.Reader(fs1, file1, conf);
    Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
    Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
    reader.next(key, value);
    Double rDouble = ((DoubleWritable) value).get();
    assertEquals(rDouble, aDouble);

    IOHelper.close(reader);
}
 
开发者ID:HydAu,项目名称:Camel,代码行数:21,代码来源:HdfsProducerTest.java

示例12: run

import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
public void run(Configuration conf, Path matrixInputPath,
    String meanSpanFileName, Path matrixOutputPath) throws IOException,
    InterruptedException, ClassNotFoundException {
  conf.set(MEANSPANOPTION, meanSpanFileName);
  Job job = new Job(conf);
  job.setJobName("Norm2Job");
  job.setJarByClass(Norm2Job.class);
  FileSystem fs = FileSystem.get(matrixInputPath.toUri(), conf);
  matrixInputPath = fs.makeQualified(matrixInputPath);
  matrixOutputPath = fs.makeQualified(matrixOutputPath);
  FileInputFormat.addInputPath(job, matrixInputPath);
  job.setInputFormatClass(SequenceFileInputFormat.class);
  job.setOutputFormatClass(SequenceFileOutputFormat.class);
  FileOutputFormat.setOutputPath(job, matrixOutputPath);
  job.setMapperClass(MyMapper.class);
  job.setReducerClass(MyReducer.class);
  job.setNumReduceTasks(1);
  job.setOutputKeyClass(NullWritable.class);
  job.setOutputValueClass(DoubleWritable.class);
  job.submit();
  job.waitForCompletion(true);
}
 
开发者ID:SiddharthMalhotra,项目名称:sPCA,代码行数:23,代码来源:Norm2Job.java

示例13: loadResult

import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
public double loadResult(Path outputDirPath, Configuration conf) throws IOException {
  Path finalNumberFile = new Path(outputDirPath, "part-r-00000");
  SequenceFileIterator<NullWritable, DoubleWritable> iterator = 
      new SequenceFileIterator<NullWritable, DoubleWritable>(
      finalNumberFile, true, conf);
  double norm2;
  try {
    Pair<NullWritable, DoubleWritable> next = iterator.next();
    norm2 = next.getSecond().get();
    if (iterator.hasNext())
      throw new IOException("More than one value after norm2Job!");
  } finally {
    Closeables.close(iterator, false);
  }
  return norm2;
}
 
开发者ID:SiddharthMalhotra,项目名称:sPCA,代码行数:17,代码来源:Norm2Job.java

示例14: verifyReducerOutput

import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
private void verifyReducerOutput(
    DummyRecordWriter<IntWritable, DoubleWritable> writer) {
  Assert.assertEquals("The reducer should output three key!", 3, writer
      .getKeys().size());
  for (IntWritable key : writer.getKeys()) {
    List<DoubleWritable> list = writer.getValue(key);
    assertEquals("reducer produces more than one values per key!", 1,
        list.size());
    Double value = list.get(0).get();
    switch (key.get()) {
    case 0:
      assertEquals("the computed reconstructionError is incorrect!",
          reconstructionError, value, EPSILON);
      break;
    case 1:
      assertEquals("the computed yNorm is incorrect!", yNorm, value, EPSILON);
      break;
    case 2:
      assertEquals("the computed centralizedYNorm is incorrect!",
          centralizedYNorm, value, EPSILON);
      break;
    default:
      fail("Unknown key in reading the results: " + key);
    }
  }
}
 
开发者ID:SiddharthMalhotra,项目名称:sPCA,代码行数:27,代码来源:ReconstructionErrJobTest.java

示例15: convertJavaToHadoop

import org.apache.hadoop.io.DoubleWritable; //导入依赖的package包/类
private Class<? extends WritableComparable> convertJavaToHadoop(final Class klass) {
    if (klass.equals(String.class)) {
        return Text.class;
    } else if (klass.equals(Integer.class)) {
        return IntWritable.class;
    } else if (klass.equals(Double.class)) {
        return DoubleWritable.class;
    } else if (klass.equals(Long.class)) {
        return LongWritable.class;
    } else if (klass.equals(Float.class)) {
        return FloatWritable.class;
    } else if (klass.equals(Boolean.class)) {
        return BooleanWritable.class;
    } else {
        throw new IllegalArgumentException("The provided class is not supported: " + klass.getSimpleName());
    }
}
 
开发者ID:graben1437,项目名称:titan0.5.4-hbase1.1.1-custom,代码行数:18,代码来源:HadoopPipeline.java


注:本文中的org.apache.hadoop.io.DoubleWritable类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。