当前位置: 首页>>代码示例>>Java>>正文


Java SortedMapWritable.put方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.SortedMapWritable.put方法的典型用法代码示例。如果您正苦于以下问题:Java SortedMapWritable.put方法的具体用法?Java SortedMapWritable.put怎么用?Java SortedMapWritable.put使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.SortedMapWritable的用法示例。


在下文中一共展示了SortedMapWritable.put方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: readSortedMap

import org.apache.hadoop.io.SortedMapWritable; //导入方法依赖的package包/类
public <K extends WritableComparable<? super K>>
  SortedMapWritable<K> readSortedMap(SortedMapWritable<K> mw)
  throws IOException {
  if (mw == null) {
    mw = new SortedMapWritable<K>();
  }
  int length = in.readMapHeader();
  for (int i = 0; i < length; i++) {
    @SuppressWarnings("unchecked")
    K key = (K) read();
    Writable value = read();
    mw.put(key, value);
  }
  return mw;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:16,代码来源:TypedBytesWritableInput.java

示例2: testMapper

import org.apache.hadoop.io.SortedMapWritable; //导入方法依赖的package包/类
@Test
public void testMapper() throws IOException {
    SortedMapWritable expectedPoland = new SortedMapWritable();
    expectedPoland.put(new Text("Country name"), new Text("Poland"));
    expectedPoland.put(new Text("ISO 3166-2"), new Text("PL"));
    expectedPoland.put(new Text("ISO 3166-3"), new Text("POL"));
    expectedPoland.put(new Text("ISO Numeric"), new Text("616"));

    mapDriver
            .withInput(
                    new LongWritable(0),
                    new Text(
                            "Country name;ISO 3166-2;ISO 3166-3;ISO Numeric;Linked to country;Synonym1;Synonym2;Synonym3"))
            .withInput(new LongWritable(44), new Text("Poland;PL;POL;616;"));

    List<Pair<Text, SortedMapWritable>> actualOutputs = mapDriver.run();

    Assert.assertEquals(2, actualOutputs.size());
    
    Pair<Text, SortedMapWritable> actualOutputPoland = actualOutputs.get(0);
    actualOutputPoland.getSecond().containsValue("Poland");
}
 
开发者ID:tomaszguzialek,项目名称:hadoop-datacleaner,代码行数:23,代码来源:FlatFileMapperReducerTest.java

示例3: testReducerHeader

import org.apache.hadoop.io.SortedMapWritable; //导入方法依赖的package包/类
@Test
public void testReducerHeader() throws IOException {
    List<SortedMapWritable> rows = new ArrayList<SortedMapWritable>();

    SortedMapWritable header = new SortedMapWritable();
    header.put(new Text("ISO 3166-2_ISO 3166-3"), new Text("ISO 3166-2_ISO 3166-3"));
    header.put(new Text("Country name"), new Text("Country name"));
    header.put(new Text("ISO 3166-2"), new Text("ISO 3166-2"));
    header.put(new Text("ISO 3166-3"), new Text("ISO 3166-3"));
    header.put(new Text("ISO Numeric"), new Text("ISO Numeric"));
    header.put(new Text("Linked to country"), new Text("Linked to country"));
    header.put(new Text("Synonym1"), new Text("Synonym1"));
    header.put(new Text("Synonym2"), new Text("Synonym2"));
    header.put(new Text("Synonym3"), new Text("Synonym3"));
    rows.add(header);

    reduceDriver.withInput(new Text("Value distribution (Country name)"), rows);
    reduceDriver
            .withOutput(
                    NullWritable.get(),
                    new Text(
                            "Country name;ISO 3166-2;ISO 3166-2_ISO 3166-3;ISO 3166-3;ISO Numeric;Linked to country;Synonym1;Synonym2;Synonym3"));
    reduceDriver.runTest();
}
 
开发者ID:tomaszguzialek,项目名称:hadoop-datacleaner,代码行数:25,代码来源:FlatFileMapperReducerTest.java

示例4: reduce

import org.apache.hadoop.io.SortedMapWritable; //导入方法依赖的package包/类
protected void reduce(IntWritable key, Iterable<SortedMapWritable> values, Context context)
		throws IOException, InterruptedException {
	SortedMapWritable outValue = new SortedMapWritable();
	for (SortedMapWritable v : values) {
		for (@SuppressWarnings("rawtypes")
		Entry<WritableComparable, Writable> entry : v.entrySet()) {
			LongWritable count = (LongWritable) outValue.get(entry.getKey());
			if (count != null) {
				count.set(count.get() + ((LongWritable) entry.getValue()).get());
			} else {
				outValue.put(entry.getKey(),
						new LongWritable(((LongWritable) entry.getValue()).get()));
			}
		}
	}
	context.write(key, outValue);
}
 
开发者ID:geftimov,项目名称:hadoop-map-reduce-patterns,代码行数:18,代码来源:MedianAndStandardDeviationCommentLengthByHour.java

示例5: testReducerPoland

import org.apache.hadoop.io.SortedMapWritable; //导入方法依赖的package包/类
@Test
public void testReducerPoland() throws IOException {
    List<SortedMapWritable> rows = new ArrayList<SortedMapWritable>();

    SortedMapWritable poland = new SortedMapWritable();
    poland.put(new Text("Country name"), new Text("Poland"));
    poland.put(new Text("ISO 3166-2"), new Text("PL"));
    poland.put(new Text("ISO 3166-3"), new Text("POL"));
    rows.add(poland);

    reduceDriver.withInput(new Text("Value distribution (Country name)"), rows);
    reduceDriver.withOutput(NullWritable.get(), new Text("Poland;PL;POL"));
    reduceDriver.runTest();

}
 
开发者ID:tomaszguzialek,项目名称:hadoop-datacleaner,代码行数:16,代码来源:FlatFileMapperReducerTest.java

示例6: inputRowToSortedMapWritable

import org.apache.hadoop.io.SortedMapWritable; //导入方法依赖的package包/类
public static SortedMapWritable inputRowToSortedMapWritable(InputRow inputRow) {
    SortedMapWritable rowWritable = new SortedMapWritable();
    for (InputColumn<?> inputColumn : inputRow.getInputColumns()) {
        String columnName = inputColumn.getName();
        Object value = inputRow.getValue(inputColumn);
        if (value != null)
            rowWritable.put(new Text(columnName), new Text(value.toString()));
        else
            rowWritable.put(new Text(columnName), NullWritable.get());
    }
    return rowWritable;
}
 
开发者ID:tomaszguzialek,项目名称:hadoop-datacleaner,代码行数:13,代码来源:RowUtils.java

示例7: map

import org.apache.hadoop.io.SortedMapWritable; //导入方法依赖的package包/类
@SuppressWarnings("deprecation")
public void map(Object key, Text value, Context context) throws IOException,
		InterruptedException {
	Map<String, String> parsed = transformXmlToMap(value.toString());
	// Grab the "CreationDate" field,
	// since it is what we are grouping by
	String strDate = parsed.get("CreationDate");
	// Grab the comment to find the length
	String text = parsed.get("Text");
	// Get the hour this comment was posted in
	if (isNullOrEmpty(strDate) || isNullOrEmpty(text)) {
		return;
	}

	Date creationDate;

	try {
		creationDate = DATE_FORMAT.parse(strDate);
	} catch (ParseException e) {
		e.printStackTrace();
		return;
	}

	outHour.set(creationDate.getHours());
	commentLength.set(text.length());
	SortedMapWritable outCommentLength = new SortedMapWritable();
	outCommentLength.put(commentLength, ONE);
	context.write(outHour, outCommentLength);
}
 
开发者ID:geftimov,项目名称:hadoop-map-reduce-patterns,代码行数:30,代码来源:MedianAndStandardDeviationCommentLengthByHour.java

示例8: testDenmark

import org.apache.hadoop.io.SortedMapWritable; //导入方法依赖的package包/类
@Test
public void testDenmark() throws IOException {
	ImmutableBytesWritable inputKey = new ImmutableBytesWritable(
			Bytes.toBytes("Denmark"));

	List<Cell> cells = new ArrayList<Cell>();
	Cell cell = new KeyValue(Bytes.toBytes("Denmark"),
			Bytes.toBytes("mainFamily"), Bytes.toBytes("country_name"),
			Bytes.toBytes("Denmark"));
	cells.add(cell);
	cell = new KeyValue(Bytes.toBytes("Denmark"),
			Bytes.toBytes("mainFamily"), Bytes.toBytes("iso2"),
			Bytes.toBytes("DK"));
	cells.add(cell);
	cell = new KeyValue(Bytes.toBytes("Denmark"),
			Bytes.toBytes("mainFamily"), Bytes.toBytes("iso3"),
			Bytes.toBytes("DNK"));
	cells.add(cell);
	Result inputResult = Result.create(cells);

	SortedMapWritable expectedOutput = new SortedMapWritable();
	expectedOutput.put(new Text("mainFamily:country_name"), new Text(
			"Denmark"));
	expectedOutput.put(new Text("mainFamily:iso2"), new Text("DK"));
	expectedOutput
			.put(new Text("mainFamily:iso2_iso3"), new Text("DK_DNK"));
	expectedOutput.put(new Text("mainFamily:iso3"), new Text("DNK"));

	String expectedAnalyzerKey1 = "Value distribution (mainFamily:country_name)";
	String expectedAnalyzerKey2 = "Value distribution (mainFamily:iso2)";

	mapDriver.withInput(inputKey, inputResult);
	List<Pair<Text, SortedMapWritable>> actualOutputs = mapDriver.run();
	Assert.assertEquals(actualOutputs.size(), 2);
	Pair<Text, SortedMapWritable> actualOutput1 = actualOutputs.get(0);
	Pair<Text, SortedMapWritable> actualOutput2 = actualOutputs.get(1);

	Assert.assertEquals(expectedAnalyzerKey1, actualOutput1.getFirst()
			.toString());
	Assert.assertEquals(expectedAnalyzerKey2, actualOutput2.getFirst()
			.toString());
	for (@SuppressWarnings("rawtypes")
	Map.Entry<WritableComparable, Writable> mapEntry : expectedOutput
			.entrySet()) {
		Text expectedColumnName = (Text) mapEntry.getKey();
		Text expectedColumnValue = (Text) mapEntry.getValue();

		Assert.assertTrue(actualOutput1.getSecond().containsKey(
				expectedColumnName));
		Assert.assertEquals(expectedColumnValue, actualOutput1.getSecond()
				.get(expectedColumnName));

		Assert.assertTrue(actualOutput2.getSecond().containsKey(
				expectedColumnName));
		Assert.assertEquals(expectedColumnValue, actualOutput2.getSecond()
				.get(expectedColumnName));
	}
}
 
开发者ID:tomaszguzialek,项目名称:hadoop-datacleaner,代码行数:59,代码来源:HBaseTableMapperTest.java


注:本文中的org.apache.hadoop.io.SortedMapWritable.put方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。