当前位置: 首页>>代码示例>>Java>>正文


Java SortedMapWritable类代码示例

本文整理汇总了Java中org.apache.hadoop.io.SortedMapWritable的典型用法代码示例。如果您正苦于以下问题:Java SortedMapWritable类的具体用法?Java SortedMapWritable怎么用?Java SortedMapWritable使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


SortedMapWritable类属于org.apache.hadoop.io包,在下文中一共展示了SortedMapWritable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: readSortedMap

import org.apache.hadoop.io.SortedMapWritable; //导入依赖的package包/类
public <K extends WritableComparable<? super K>>
  SortedMapWritable<K> readSortedMap(SortedMapWritable<K> mw)
  throws IOException {
  if (mw == null) {
    mw = new SortedMapWritable<K>();
  }
  int length = in.readMapHeader();
  for (int i = 0; i < length; i++) {
    @SuppressWarnings("unchecked")
    K key = (K) read();
    Writable value = read();
    mw.put(key, value);
  }
  return mw;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:16,代码来源:TypedBytesWritableInput.java

示例2: reduce

import org.apache.hadoop.io.SortedMapWritable; //导入依赖的package包/类
@Override
public void reduce(Text analyzerKey, Iterable<SortedMapWritable> rows, Context context) throws IOException,
        InterruptedException {

    Analyzer<?> analyzer = ConfigurationSerializer.initializeAnalyzer(analyzerKey.toString(),
            analyzerBeansConfiguration, analysisJob);

    logger.info("analyzerKey = " + analyzerKey.toString() + " rows: ");
    for (SortedMapWritable rowWritable : rows) {
        InputRow inputRow = RowUtils.sortedMapWritableToInputRow(rowWritable, analysisJob.getSourceColumns());
        analyzer.run(inputRow, 1);

        RowUtils.printSortedMapWritable(rowWritable, logger);

        Text finalText = CsvParser.toCsvText(rowWritable);
        context.write(NullWritable.get(), finalText);
    }
    logger.info("end of analyzerKey = " + analyzerKey.toString() + " rows.");

    AnalyzerResult analyzerResult = analyzer.getResult();
    logger.debug("analyzerResult.toString(): " + analyzerResult.toString());
}
 
开发者ID:tomaszguzialek,项目名称:hadoop-datacleaner,代码行数:23,代码来源:FlatFileReducer.java

示例3: toCsvText

import org.apache.hadoop.io.SortedMapWritable; //导入依赖的package包/类
public static Text toCsvText(SortedMapWritable row) {
	Text finalText = new Text();
	for (@SuppressWarnings("rawtypes")
	Iterator<Entry<WritableComparable, Writable>> iterator = row.entrySet()
			.iterator(); iterator.hasNext();) {
		@SuppressWarnings("rawtypes")
		Entry<WritableComparable, Writable> next = iterator.next();
		if (next.getValue() instanceof Text) {
			Text value = ((Text) next.getValue());
			finalText.set(finalText.toString() + value.toString());
		} // else do not append anything - the value is null, so empty.
		if (iterator.hasNext())
			finalText.set(finalText.toString() + ";");
		else
			finalText.set(finalText.toString());
	}
	return finalText;
}
 
开发者ID:tomaszguzialek,项目名称:hadoop-datacleaner,代码行数:19,代码来源:CsvParser.java

示例4: testMapper

import org.apache.hadoop.io.SortedMapWritable; //导入依赖的package包/类
@Test
public void testMapper() throws IOException {
    SortedMapWritable expectedPoland = new SortedMapWritable();
    expectedPoland.put(new Text("Country name"), new Text("Poland"));
    expectedPoland.put(new Text("ISO 3166-2"), new Text("PL"));
    expectedPoland.put(new Text("ISO 3166-3"), new Text("POL"));
    expectedPoland.put(new Text("ISO Numeric"), new Text("616"));

    mapDriver
            .withInput(
                    new LongWritable(0),
                    new Text(
                            "Country name;ISO 3166-2;ISO 3166-3;ISO Numeric;Linked to country;Synonym1;Synonym2;Synonym3"))
            .withInput(new LongWritable(44), new Text("Poland;PL;POL;616;"));

    List<Pair<Text, SortedMapWritable>> actualOutputs = mapDriver.run();

    Assert.assertEquals(2, actualOutputs.size());
    
    Pair<Text, SortedMapWritable> actualOutputPoland = actualOutputs.get(0);
    actualOutputPoland.getSecond().containsValue("Poland");
}
 
开发者ID:tomaszguzialek,项目名称:hadoop-datacleaner,代码行数:23,代码来源:FlatFileMapperReducerTest.java

示例5: testReducerHeader

import org.apache.hadoop.io.SortedMapWritable; //导入依赖的package包/类
@Test
public void testReducerHeader() throws IOException {
    List<SortedMapWritable> rows = new ArrayList<SortedMapWritable>();

    SortedMapWritable header = new SortedMapWritable();
    header.put(new Text("ISO 3166-2_ISO 3166-3"), new Text("ISO 3166-2_ISO 3166-3"));
    header.put(new Text("Country name"), new Text("Country name"));
    header.put(new Text("ISO 3166-2"), new Text("ISO 3166-2"));
    header.put(new Text("ISO 3166-3"), new Text("ISO 3166-3"));
    header.put(new Text("ISO Numeric"), new Text("ISO Numeric"));
    header.put(new Text("Linked to country"), new Text("Linked to country"));
    header.put(new Text("Synonym1"), new Text("Synonym1"));
    header.put(new Text("Synonym2"), new Text("Synonym2"));
    header.put(new Text("Synonym3"), new Text("Synonym3"));
    rows.add(header);

    reduceDriver.withInput(new Text("Value distribution (Country name)"), rows);
    reduceDriver
            .withOutput(
                    NullWritable.get(),
                    new Text(
                            "Country name;ISO 3166-2;ISO 3166-2_ISO 3166-3;ISO 3166-3;ISO Numeric;Linked to country;Synonym1;Synonym2;Synonym3"));
    reduceDriver.runTest();
}
 
开发者ID:tomaszguzialek,项目名称:hadoop-datacleaner,代码行数:25,代码来源:FlatFileMapperReducerTest.java

示例6: reduce

import org.apache.hadoop.io.SortedMapWritable; //导入依赖的package包/类
public void reduce(Text analyzerKey, Iterable<SortedMapWritable> writableResults, Context context)
        throws IOException, InterruptedException {

    Analyzer<?> analyzer = ConfigurationSerializer.initializeAnalyzer(analyzerKey.toString(), analyzerBeansConfiguration, analysisJob);

    logger.info("analyzerKey = " + analyzerKey.toString() + " rows: ");
    for (SortedMapWritable rowWritable : writableResults) {
        InputRow inputRow = RowUtils.sortedMapWritableToInputRow(rowWritable, analysisJob.getSourceColumns());
        analyzer.run(inputRow, 1);

        Result result = ResultUtils.sortedMapWritableToResult(rowWritable);
        ResultUtils.printResult(result, logger);
        Put put = ResultUtils.preparePut(result);
        context.write(NullWritable.get(), put);
    }
    logger.info("end of analyzerKey = " + analyzerKey.toString() + " rows.");

    AnalyzerResult analyzerResult = analyzer.getResult();
    logger.debug("analyzerResult.toString(): " + analyzerResult.toString());
}
 
开发者ID:tomaszguzialek,项目名称:hadoop-datacleaner,代码行数:21,代码来源:HBaseTableReducer.java

示例7: sortedMapWritableToResult

import org.apache.hadoop.io.SortedMapWritable; //导入依赖的package包/类
public static Result sortedMapWritableToResult(SortedMapWritable row) {
    List<Cell> cells = new ArrayList<Cell>();
    for (@SuppressWarnings("rawtypes")
    Map.Entry<WritableComparable, Writable> rowEntry : row.entrySet()) {
        Text columnFamilyAndName = (Text) rowEntry.getKey();
        Text columnValue = (Text) rowEntry.getValue();
        String[] split = columnFamilyAndName.toString().split(":");
        String columnFamily = split[0];
        String columnName = split[1];
        
        Cell cell = new KeyValue(Bytes.toBytes(columnValue.toString()), Bytes.toBytes(columnFamily),
                Bytes.toBytes(columnName), Bytes.toBytes(columnValue.toString()));
        cells.add(cell);
    }
    return Result.create(cells);
}
 
开发者ID:tomaszguzialek,项目名称:hadoop-datacleaner,代码行数:17,代码来源:ResultUtils.java

示例8: reduce

import org.apache.hadoop.io.SortedMapWritable; //导入依赖的package包/类
protected void reduce(IntWritable key, Iterable<SortedMapWritable> values, Context context)
		throws IOException, InterruptedException {
	SortedMapWritable outValue = new SortedMapWritable();
	for (SortedMapWritable v : values) {
		for (@SuppressWarnings("rawtypes")
		Entry<WritableComparable, Writable> entry : v.entrySet()) {
			LongWritable count = (LongWritable) outValue.get(entry.getKey());
			if (count != null) {
				count.set(count.get() + ((LongWritable) entry.getValue()).get());
			} else {
				outValue.put(entry.getKey(),
						new LongWritable(((LongWritable) entry.getValue()).get()));
			}
		}
	}
	context.write(key, outValue);
}
 
开发者ID:geftimov,项目名称:hadoop-map-reduce-patterns,代码行数:18,代码来源:MedianAndStandardDeviationCommentLengthByHour.java

示例9: write

import org.apache.hadoop.io.SortedMapWritable; //导入依赖的package包/类
public void write(Writable w) throws IOException {
  if (w instanceof TypedBytesWritable) {
    writeTypedBytes((TypedBytesWritable) w);
  } else if (w instanceof BytesWritable) {
    writeBytes((BytesWritable) w);
  } else if (w instanceof ByteWritable) {
    writeByte((ByteWritable) w);
  } else if (w instanceof BooleanWritable) {
    writeBoolean((BooleanWritable) w);
  } else if (w instanceof IntWritable) {
    writeInt((IntWritable) w);
  } else if (w instanceof VIntWritable) {
    writeVInt((VIntWritable) w);
  } else if (w instanceof LongWritable) {
    writeLong((LongWritable) w);
  } else if (w instanceof VLongWritable) {
    writeVLong((VLongWritable) w);
  } else if (w instanceof FloatWritable) {
    writeFloat((FloatWritable) w);
  } else if (w instanceof DoubleWritable) {
    writeDouble((DoubleWritable) w);
  } else if (w instanceof Text) {
    writeText((Text) w);
  } else if (w instanceof ArrayWritable) {
    writeArray((ArrayWritable) w);
  } else if (w instanceof MapWritable) {
    writeMap((MapWritable) w);
  } else if (w instanceof SortedMapWritable) {
    writeSortedMap((SortedMapWritable) w);
  } else if (w instanceof Record) {
    writeRecord((Record) w);
  } else {
    writeWritable(w); // last resort
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TypedBytesWritableOutput.java

示例10: writeSortedMap

import org.apache.hadoop.io.SortedMapWritable; //导入依赖的package包/类
public void writeSortedMap(SortedMapWritable smw) throws IOException {
  out.writeMapHeader(smw.size());
  for (Map.Entry<WritableComparable, Writable> entry : smw.entrySet()) {
    write(entry.getKey());
    write(entry.getValue());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TypedBytesWritableOutput.java

示例11: write

import org.apache.hadoop.io.SortedMapWritable; //导入依赖的package包/类
public void write(Writable w) throws IOException {
  if (w instanceof TypedBytesWritable) {
    writeTypedBytes((TypedBytesWritable) w);
  } else if (w instanceof BytesWritable) {
    writeBytes((BytesWritable) w);
  } else if (w instanceof ByteWritable) {
    writeByte((ByteWritable) w);
  } else if (w instanceof BooleanWritable) {
    writeBoolean((BooleanWritable) w);
  } else if (w instanceof IntWritable) {
    writeInt((IntWritable) w);
  } else if (w instanceof VIntWritable) {
    writeVInt((VIntWritable) w);
  } else if (w instanceof LongWritable) {
    writeLong((LongWritable) w);
  } else if (w instanceof VLongWritable) {
    writeVLong((VLongWritable) w);
  } else if (w instanceof FloatWritable) {
    writeFloat((FloatWritable) w);
  } else if (w instanceof DoubleWritable) {
    writeDouble((DoubleWritable) w);
  } else if (w instanceof Text) {
    writeText((Text) w);
  } else if (w instanceof ArrayWritable) {
    writeArray((ArrayWritable) w);
  } else if (w instanceof MapWritable) {
    writeMap((MapWritable) w);
  } else if (w instanceof SortedMapWritable) {
    writeSortedMap((SortedMapWritable<?>) w);
  } else if (w instanceof Record) {
    writeRecord((Record) w);
  } else {
    writeWritable(w); // last resort
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:36,代码来源:TypedBytesWritableOutput.java

示例12: writeSortedMap

import org.apache.hadoop.io.SortedMapWritable; //导入依赖的package包/类
public void writeSortedMap(SortedMapWritable<?> smw) throws IOException {
  out.writeMapHeader(smw.size());
  for (Map.Entry<? extends WritableComparable<?>, Writable> entry : smw.entrySet()) {
    write(entry.getKey());
    write(entry.getValue());
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:8,代码来源:TypedBytesWritableOutput.java

示例13: setup

import org.apache.hadoop.io.SortedMapWritable; //导入依赖的package包/类
protected void setup(Reducer<Text, SortedMapWritable, NullWritable, Text>.Context context) throws IOException,
        InterruptedException {
    Configuration mapReduceConfiguration = context.getConfiguration();
    String datastoresConfigurationLines = mapReduceConfiguration
            .get(FlatFileTool.ANALYZER_BEANS_CONFIGURATION_DATASTORES_KEY);
    String analysisJobXml = mapReduceConfiguration.get(FlatFileTool.ANALYSIS_JOB_XML_KEY);
    analyzerBeansConfiguration = ConfigurationSerializer
            .deserializeAnalyzerBeansDatastores(datastoresConfigurationLines);
    analysisJob = ConfigurationSerializer.deserializeAnalysisJobFromXml(analysisJobXml, analyzerBeansConfiguration);
    super.setup(context);
}
 
开发者ID:tomaszguzialek,项目名称:hadoop-datacleaner,代码行数:12,代码来源:FlatFileReducer.java

示例14: setup

import org.apache.hadoop.io.SortedMapWritable; //导入依赖的package包/类
protected void setup(Mapper<LongWritable, Text, Text, SortedMapWritable>.Context context) throws IOException,
        InterruptedException {
    Configuration mapReduceConfiguration = context.getConfiguration();
    String datastoresConfigurationLines = mapReduceConfiguration
            .get(FlatFileTool.ANALYZER_BEANS_CONFIGURATION_DATASTORES_KEY);
    String analysisJobXml = mapReduceConfiguration.get(FlatFileTool.ANALYSIS_JOB_XML_KEY);
    this.mapperDelegate = new MapperDelegate(datastoresConfigurationLines, analysisJobXml);
    csvParser = new CsvParser(mapperDelegate.getAnalysisJob().getSourceColumns(), ";");
    super.setup(context);
}
 
开发者ID:tomaszguzialek,项目名称:hadoop-datacleaner,代码行数:11,代码来源:FlatFileMapper.java

示例15: runMapReduceJob

import org.apache.hadoop.io.SortedMapWritable; //导入依赖的package包/类
private int runMapReduceJob(String input, String output, Configuration mapReduceConfiguration) throws IOException,
            InterruptedException, ClassNotFoundException {

        Job job = Job.getInstance(mapReduceConfiguration);
        job.setJarByClass(FlatFileMapper.class);
        job.setJobName(this.getClass().getName());
        
        FileInputFormat.setInputPaths(job, new Path(input));
        FileOutputFormat.setOutputPath(job, new Path(output));

        job.setMapperClass(FlatFileMapper.class);
        job.setReducerClass(FlatFileReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(SortedMapWritable.class);

        job.setNumReduceTasks(1);
        
        // TODO externalize to args?
//        mapReduceConfiguration.addResource(new Path("/etc/hadoop/conf/core-site.xml"));

        FileSystem fileSystem = FileSystem.get(mapReduceConfiguration);
        if (fileSystem.exists(new Path(output)))
            fileSystem.delete(new Path(output), true);

        boolean success = job.waitForCompletion(true);
        return success ? 0 : 1;
    }
 
开发者ID:tomaszguzialek,项目名称:hadoop-datacleaner,代码行数:29,代码来源:FlatFileTool.java


注:本文中的org.apache.hadoop.io.SortedMapWritable类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。