当前位置: 首页>>代码示例>>Java>>正文


Java MapWritable.put方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.MapWritable.put方法的典型用法代码示例。如果您正苦于以下问题:Java MapWritable.put方法的具体用法?Java MapWritable.put怎么用?Java MapWritable.put使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.MapWritable的用法示例。


在下文中一共展示了MapWritable.put方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: reduce

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
@Override
protected void reduce(StatsUserDimension key, Iterable<TimeOutputValue> values, Context context)
		throws IOException, InterruptedException {
	this.unique.clear();

	// 开始计算uuid的个数
	for (TimeOutputValue value : values) {
		this.unique.add(value.getId());// uid,用户ID
	}
	MapWritable map = new MapWritable();// 相当于java中的hashmap
	map.put(new IntWritable(-1), new IntWritable(this.unique.size()));
	outputValue.setValue(map);

	// 设置kpi名称
	String kpiName = key.getStatsCommon().getKpi().getKpiName();
	if (KpiType.NEW_INSTALL_USER.name.equals(kpiName)) {
		// 计算stats_user表中的新增用户
		outputValue.setKpi(KpiType.NEW_INSTALL_USER);
	} else if (KpiType.BROWSER_NEW_INSTALL_USER.name.equals(kpiName)) {
		// 计算stats_device_browser表中的新增用户
		outputValue.setKpi(KpiType.BROWSER_NEW_INSTALL_USER);
	}
	context.write(key, outputValue);
}
 
开发者ID:liuhaozzu,项目名称:big_data,代码行数:25,代码来源:NewInstallUserReducer.java

示例2: configureGenericRecordExportInputFormat

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
private void configureGenericRecordExportInputFormat(Job job, String tableName)
    throws IOException {
  ConnManager connManager = context.getConnManager();
  Map<String, Integer> columnTypeInts;
  if (options.getCall() == null) {
    columnTypeInts = connManager.getColumnTypes(
        tableName,
        options.getSqlQuery());
  } else {
    columnTypeInts = connManager.getColumnTypesForProcedure(
        options.getCall());
  }
  String[] specifiedColumns = options.getColumns();
  MapWritable columnTypes = new MapWritable();
  for (Map.Entry<String, Integer> e : columnTypeInts.entrySet()) {
    String column = e.getKey();
    column = (specifiedColumns == null) ? column : options.getColumnNameCaseInsensitive(column);
    if (column != null) {
      Text columnName = new Text(column);
      Text columnType = new Text(connManager.toJavaType(tableName, column, e.getValue()));
      columnTypes.put(columnName, columnType);
    }
  }
  DefaultStringifier.store(job.getConfiguration(), columnTypes,
      AvroExportMapper.AVRO_COLUMN_TYPES_MAP);
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:27,代码来源:JdbcExportJob.java

示例3: processTupleViolation

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
private void processTupleViolation(MapWritable fieldMapWritable,
		Map<String, Integer> fieldFileViolationsMap, StringBuffer wb,
		DataViolationWritableBean fileViolationsWritable, String fileName)
		throws IOException {
	IntWritable fieldNumber = new IntWritable();
	IntWritable fieldViolations = new IntWritable(0);
	int violations;
	fieldNumber = new IntWritable(fileViolationsWritable.getFieldNumber());
	fieldViolations = (IntWritable) fieldMapWritable.get((fieldNumber));
	fieldViolations = setFieldViolations(fieldViolations);
	fieldMapWritable.put(fieldNumber, fieldViolations);
	violations = extractViolationsFromMap(fieldFileViolationsMap, fileName);
	violations += 1;
	fieldFileViolationsMap.put(fileName, violations);
	writeViolationsToBuffer(fileViolationsWritable, fileName, wb, violations);
}
 
开发者ID:Impetus,项目名称:jumbune,代码行数:17,代码来源:DataValidationReducer.java

示例4: map

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
@Override
public void map(LongWritable key, Text value, Mapper.Context context) throws IOException, InterruptedException {
    TrecOLParser document = new TrecOLParser(value.toString());
    documentAnalyzed = new MapWritable();
    if (document.isParsed()) {
        this.tokenizer.tokenize(document.getDocContent());
        while (this.tokenizer.hasMoreTokens()) {
            IntWritable counter = CastingTypes.zero;
            String newTerm = this.tokenizer.nextToken();
            Text term = new Text(newTerm);
            if (documentAnalyzed.containsKey(term)) {
                counter = CastingTypes.strToIntWr(documentAnalyzed.get(term).toString());
            }
            documentAnalyzed.put(term, CastingTypes.intToIntWr(counter.get()+1));
        }
        if ( ! documentAnalyzed.isEmpty()) {
            context.write(CastingTypes.strToIntWr(document.getDocId()), documentAnalyzed);
        }
    }
}
 
开发者ID:tomasdelvechio,项目名称:YarnExamples,代码行数:21,代码来源:NutchMap.java

示例5: map

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
@Override
public void map(Object key, Text value, Context context)
		throws IOException, InterruptedException {
	Configuration conf = context.getConfiguration();
	String prefix = conf.get("prefix");
	
	MapWritable doc = new MapWritable();
	String[] line = value.toString().split(",");
	doc.put(new Text(prefix+"Id"),new Text(line[1]+"-"+line[2]+"-"+line[0]));
	doc.put(new Text(prefix+"SiteName"), new Text(line[1]));
	doc.put(new Text(prefix+"RoomName"), new Text(line[2]));
	doc.put(new Text(prefix+"Fecha"), new Text(line[3].replace(' ','T')));
	doc.put(new Text(prefix+"Power"), new FloatWritable(Float.parseFloat(line[4])));
	doc.put(new Text(prefix+"Temp"), new FloatWritable(Float.parseFloat(line[5])));
	doc.put(new Text(prefix+"Humidity"), new FloatWritable(Float.parseFloat(line[6])));
	doc.put(new Text(prefix+"Timestamp"), new Text(line[6].replace(' ','T')));
	
	context.write(NullWritable.get(), doc);
}
 
开发者ID:jucaf,项目名称:datacentermr,代码行数:20,代码来源:EsFeederMapper.java

示例6: populateMap

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
private void populateMap(SortedMap<ByteBuffer, IColumn> cvalue, MapWritable value)
{
  for (Map.Entry<ByteBuffer, IColumn> e : cvalue.entrySet())
  {
    ByteBuffer k = e.getKey();
    IColumn    v = e.getValue();

    if (!v.isLive()) {
      continue;
    }

    BytesWritable newKey   = convertByteBuffer(k);
    BytesWritable newValue = convertByteBuffer(v.value());

    value.put(newKey, newValue);
  }
}
 
开发者ID:dvasilen,项目名称:Hive-Cassandra,代码行数:18,代码来源:CassandraHiveRecordReader.java

示例7: next

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
/**
 * Grabs the next result and process the DynamicTableEntity into a Hive
 * friendly MapWriteable
 * 
 * @param key
 *            The RowID for the entity. Not that this is not really an Azure
 *            key, since the partition is implicit in the key
 * @param value
 *            A MapWriteable which will be populated with values from the
 *            DynamicTableEntity returned by the Azure query.
 */
public boolean next(Text key, MapWritable value) throws IOException {
	if (!results.hasNext())
		return false;
	DynamicTableEntity entity = results.next();
	key.set(entity.getRowKey());
	for (Entry<String, EntityProperty> entry : entity.getProperties()
			.entrySet()) {

		final EntityProperty property = entry.getValue();
		// Note that azure table entity keys are forced to lower case for
		// matching with hive column names
		final String propertyKey = entry.getKey().toLowerCase();
		final String propertyValue = property.getValueAsString();
		final Writable writableValue = SERIALIZED_NULL
				.equals(propertyValue) ? NullWritable.get() : new Text(
				propertyValue);
		value.put(new Text(propertyKey), writableValue);
	}
	pos++;
	return true;
}
 
开发者ID:simonellistonball,项目名称:hive-azuretables,代码行数:33,代码来源:AzureTablesRecordReader.java

示例8: configureInputFormat

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
@Override
protected void configureInputFormat(Job job, String tableName,
    String tableClassName, String splitByCol)
    throws ClassNotFoundException, IOException {

  fileType = getInputFileType();

  super.configureInputFormat(job, tableName, tableClassName, splitByCol);

  if (fileType == FileType.AVRO_DATA_FILE) {
    LOG.debug("Configuring for Avro export");
    ConnManager connManager = context.getConnManager();
    Map<String, Integer> columnTypeInts =
      connManager.getColumnTypes(tableName, options.getSqlQuery());
    MapWritable columnTypes = new MapWritable();
    for (Map.Entry<String, Integer> e : columnTypeInts.entrySet()) {
      Text columnName = new Text(e.getKey());
      Text columnText = new Text(
          connManager.toJavaType(tableName, e.getKey(), e.getValue()));
      columnTypes.put(columnName, columnText);
    }
    DefaultStringifier.store(job.getConfiguration(), columnTypes,
        AvroExportMapper.AVRO_COLUMN_TYPES_MAP);
  }

}
 
开发者ID:infinidb,项目名称:sqoop,代码行数:27,代码来源:JdbcExportJob.java

示例9: reduce

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
@Override
public void reduce(Text key, Iterable<MapWritable> listOfMaps, Context context) throws IOException, InterruptedException {

	for (MapWritable partialResultMap : listOfMaps) {
		for (Writable attributeText : partialResultMap.keySet()) {
			MapWritable partialInsideMap = (MapWritable) partialResultMap.get(attributeText);
			MapWritable partialOutputMap = new MapWritable();
			
			for (Writable rule : partialInsideMap.keySet()) {
				Text regola = (Text) rule;
				Text valore = (Text) partialInsideMap.get(rule);
				
				partialOutputMap.put(new Text(regola.toString()), new Text(valore.toString()));
			}
			
			result.put((Text)attributeText, partialOutputMap);
		}
	}
	
	Text resultWrite = new Text(MapWritableConverter.toJsonText(result));
	
	context.write(key,resultWrite);       
}
 
开发者ID:disheng,项目名称:alfred-mpi,代码行数:24,代码来源:XPathApplierTextReducer.java

示例10: reduce

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
@Override
public void reduce(Text key, Iterable<MapWritable> listOfMaps, Context context) throws IOException, InterruptedException {

	for (MapWritable partialResultMap : listOfMaps) {
		for (Writable attributeText : partialResultMap.keySet()) {
			MapWritable partialInsideMap = (MapWritable) partialResultMap.get(attributeText);
			MapWritable partialOutputMap = new MapWritable();
			
			for (Writable rule : partialInsideMap.keySet()) {
				Text regola = (Text) rule;
				Text valore = (Text) partialInsideMap.get(rule);
				
				partialOutputMap.put(new Text(regola.toString()), new Text(valore.toString()));
			}
			
			result.put((Text)attributeText, partialOutputMap);
		}
	}
		
	context.write(key,result);       
}
 
开发者ID:disheng,项目名称:alfred-mpi,代码行数:22,代码来源:XPathApplierReducer.java

示例11: map

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

    String text = value.toString().replaceAll("(\\r|\\n|\\r\\n)+", "\\s");
    String[] values = text.split("\\s");
    for (String v : values) {
        MapWritable doc = new MapWritable();
        doc.put(new Text("word"), new Text(v));
        context.write(NullWritable.get(), doc);
    }
}
 
开发者ID:rezaei121,项目名称:elasticsearch-mapreduce-wordcount,代码行数:12,代码来源:esMapImport.java

示例12: writeProperties

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
public static final void writeProperties(DataOutput out, Properties props) throws IOException {
  MapWritable propsWritable = new MapWritable();
  for (Entry<Object, Object> prop : props.entrySet()) {
    Writable key = new Text(prop.getKey().toString());
    Writable value = new Text(prop.getValue().toString());
    propsWritable.put(key,value);
  }
  propsWritable.write(out);
}
 
开发者ID:jianglibo,项目名称:gora-boot,代码行数:10,代码来源:WritableUtils.java

示例13: testMapFieldExtractorNested

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
@Test
public void testMapFieldExtractorNested() throws Exception {
    ConstantFieldExtractor cfe = new MapWritableFieldExtractor();
    Map<Writable, Writable> m = new MapWritable();
    MapWritable nested = new MapWritable();
    nested.put(new Text("bar"), new Text("found"));
    m.put(new Text("foo"), nested);
    assertEquals(new Text("found"), extract(cfe, "foo.bar", m));
}
 
开发者ID:xushjie1987,项目名称:es-hadoop-v2.2.0,代码行数:10,代码来源:FieldExtractorTests.java

示例14: regionServerStartup

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
@Override
public MapWritable regionServerStartup(final int port,
  final long serverStartCode, final long serverCurrentTime)
throws IOException {
  // Register with server manager
  InetAddress ia = HBaseServer.getRemoteIp();
  ServerName rs = this.serverManager.regionServerStartup(ia, port,
    serverStartCode, serverCurrentTime);
  // Send back some config info
  MapWritable mw = createConfigurationSubset();
  mw.put(new Text(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER),
    new Text(rs.getHostname()));
  return mw;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:15,代码来源:HMaster.java

示例15: getCurrentValue

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
@Override
public MapWritable getCurrentValue() throws IOException, InterruptedException {
	MapWritable mapWritable = new MapWritable();
	mapWritable.put(new Text("tag"), new Text(datalinks.get(recordName)));
	mapWritable.put(new Text("record"), new Text(files[index].getPath().toString()));
	return mapWritable;
}
 
开发者ID:apache,项目名称:incubator-taverna-engine,代码行数:8,代码来源:TavernaRecordReader.java


注:本文中的org.apache.hadoop.io.MapWritable.put方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。