当前位置: 首页>>代码示例>>Java>>正文


Java MapWritable.get方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.MapWritable.get方法的典型用法代码示例。如果您正苦于以下问题:Java MapWritable.get方法的具体用法?Java MapWritable.get怎么用?Java MapWritable.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.MapWritable的用法示例。


在下文中一共展示了MapWritable.get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: convertToMap

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
private static Map<String, Map<String,String>> convertToMap(MapWritable inputMap) {
	Map<String, Map<String,String>> mapResult = Maps.newHashMap();
	
	for (Writable attributeText : inputMap.keySet()) {
		MapWritable partialInsideMap = (MapWritable) inputMap.get(attributeText);
		Map<String,String> partialOutputMap = Maps.newHashMap();
		
		for (Writable rule : partialInsideMap.keySet()) {
			Text regola = (Text) rule;
			Text valore = (Text) partialInsideMap.get(rule);
			
			partialOutputMap.put(regola.toString(), valore.toString());
		}
		
		mapResult.put(((Text)attributeText).toString(), partialOutputMap);
	}
	
	return mapResult;
}
 
开发者ID:disheng,项目名称:alfred-mpi,代码行数:20,代码来源:MapWritableConverter.java

示例2: getSelectorByQueryType

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
/**
 * Pulls the correct selector from the MapWritable data element given the queryType
 * <p>
 * Pulls first element of array if element is an array type
 */
public static String getSelectorByQueryType(MapWritable dataMap, QuerySchema qSchema, DataSchema dSchema)
{
  String selector;

  String fieldName = qSchema.getSelectorName();
  if (dSchema.isArrayElement(fieldName))
  {
    if (dataMap.get(dSchema.getTextName(fieldName)) instanceof WritableArrayWritable)
    {
      String[] selectorArray = ((WritableArrayWritable) dataMap.get(dSchema.getTextName(fieldName))).toStrings();
      selector = selectorArray[0];
    }
    else
    {
      String[] elementArray = ((ArrayWritable) dataMap.get(dSchema.getTextName(fieldName))).toStrings();
      selector = elementArray[0];
    }
  }
  else
  {
    selector = dataMap.get(dSchema.getTextName(fieldName)).toString();
  }

  return selector;
}
 
开发者ID:apache,项目名称:incubator-pirk,代码行数:31,代码来源:QueryUtils.java

示例3: processTupleViolation

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
private void processTupleViolation(MapWritable fieldMapWritable,
		Map<String, Integer> fieldFileViolationsMap, StringBuffer wb,
		DataViolationWritableBean fileViolationsWritable, String fileName)
		throws IOException {
	IntWritable fieldNumber = new IntWritable();
	IntWritable fieldViolations = new IntWritable(0);
	int violations;
	fieldNumber = new IntWritable(fileViolationsWritable.getFieldNumber());
	fieldViolations = (IntWritable) fieldMapWritable.get((fieldNumber));
	fieldViolations = setFieldViolations(fieldViolations);
	fieldMapWritable.put(fieldNumber, fieldViolations);
	violations = extractViolationsFromMap(fieldFileViolationsMap, fileName);
	violations += 1;
	fieldFileViolationsMap.put(fileName, violations);
	writeViolationsToBuffer(fileViolationsWritable, fileName, wb, violations);
}
 
开发者ID:Impetus,项目名称:jumbune,代码行数:17,代码来源:DataValidationReducer.java

示例4: deserialize

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
@Override
public Object deserialize(final Writable wr) throws SerDeException {
	if (!(wr instanceof MapWritable)) {
		throw new SerDeException("Expected MapWritable, received "
				+ wr.getClass().getName());
	}

	final MapWritable input = (MapWritable) wr;
	final Text t = new Text();
	row.clear();

	for (int i = 0; i < fieldCount; i++) {
		t.set(columnNames.get(i));
		final Writable value = input.get(t);
		if (value != null && !NullWritable.get().equals(value)) {
			row.add(value.toString());
		} else {
			row.add(null);
		}
	}

	return row;
}
 
开发者ID:simonellistonball,项目名称:hive-azuretables,代码行数:24,代码来源:AzureTablesSerDe.java

示例5: reduce

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
@Override
public void reduce(Text key, Iterable<MapWritable> listOfMaps, Context context) throws IOException, InterruptedException {

	for (MapWritable partialResultMap : listOfMaps) {
		for (Writable attributeText : partialResultMap.keySet()) {
			MapWritable partialInsideMap = (MapWritable) partialResultMap.get(attributeText);
			MapWritable partialOutputMap = new MapWritable();
			
			for (Writable rule : partialInsideMap.keySet()) {
				Text regola = (Text) rule;
				Text valore = (Text) partialInsideMap.get(rule);
				
				partialOutputMap.put(new Text(regola.toString()), new Text(valore.toString()));
			}
			
			result.put((Text)attributeText, partialOutputMap);
		}
	}
	
	Text resultWrite = new Text(MapWritableConverter.toJsonText(result));
	
	context.write(key,resultWrite);       
}
 
开发者ID:disheng,项目名称:alfred-mpi,代码行数:24,代码来源:XPathApplierTextReducer.java

示例6: reduce

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
@Override
public void reduce(Text key, Iterable<MapWritable> listOfMaps, Context context) throws IOException, InterruptedException {

	for (MapWritable partialResultMap : listOfMaps) {
		for (Writable attributeText : partialResultMap.keySet()) {
			MapWritable partialInsideMap = (MapWritable) partialResultMap.get(attributeText);
			MapWritable partialOutputMap = new MapWritable();
			
			for (Writable rule : partialInsideMap.keySet()) {
				Text regola = (Text) rule;
				Text valore = (Text) partialInsideMap.get(rule);
				
				partialOutputMap.put(new Text(regola.toString()), new Text(valore.toString()));
			}
			
			result.put((Text)attributeText, partialOutputMap);
		}
	}
		
	context.write(key,result);       
}
 
开发者ID:disheng,项目名称:alfred-mpi,代码行数:22,代码来源:XPathApplierReducer.java

示例7: deserialize

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
@Override
public Object deserialize(Writable wrtbl) throws SerDeException {
    MapWritable input = (MapWritable) wrtbl;
    Text t = new Text();
    row.clear();
    for (int i = 0; i < fieldCount; i++) {
        t.set(majorMinorKeys.get(i));
        Writable value = input.get(t);
        if (value != null && !NullWritable.get().equals(value)) {
            row.add(value.toString());
        } else {
            row.add(null);
        }
    }
    return row;
}
 
开发者ID:vilcek,项目名称:HiveKVStorageHandler2,代码行数:17,代码来源:KVHiveSerDe.java

示例8: map

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
public void map(Text docId, MapWritable doc, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
	Writable title = doc.get(new Text("text_content"));
	StringTokenizer tokenizer = new StringTokenizer(title.toString());
	while (tokenizer.hasMoreTokens()) {
		word.set(tokenizer.nextToken());
		output.collect(word, one);
	}
	
	/*for (Writable val : doc.values()) {
		word.set(val.toString());
		output.collect(word, one);
	}*/
	
	/*for (Writable key : doc.keySet()) {
		word.set(key.toString());
		output.collect(word, one);
	}*/
	
	/*Writable title = doc.get("article_dc_title");
	if (title != null) {
		log.info(title.toString());
		StringTokenizer tokenizer = new StringTokenizer(title.toString());
		while (tokenizer.hasMoreTokens()) {
			word.set(tokenizer.nextToken());
			output.collect(word, one);
		}
	} else {
		log.info("article_dc_title not found, possible values:");
		for (Writable key : doc.keySet()) {
			log.info("KEY: | " + key.toString() + " |\n");
		}
	}*/
}
 
开发者ID:NLeSC,项目名称:benchmarking-elasticsearch,代码行数:34,代码来源:mapred1.java

示例9: map

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
public void map(Text docId, MapWritable doc, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
	WritableArrayWritable fields = (WritableArrayWritable) doc.get(new Text("text_content"));
	Writable title = fields.get()[0];
	StringTokenizer tokenizer = new StringTokenizer(title.toString());
	while (tokenizer.hasMoreTokens()) {
		word.set(tokenizer.nextToken());
		output.collect(word, one);
	}
	
	/*for (Writable val : doc.values()) {
		word.set(val.toString());
		output.collect(word, one);
	}*/
	
	/*for (Writable key : doc.keySet()) {
		word.set(key.toString());
		output.collect(word, one);
	}*/
	
	/*Writable title = doc.get("article_dc_title");
	if (title != null) {
		log.info(title.toString());
		StringTokenizer tokenizer = new StringTokenizer(title.toString());
		while (tokenizer.hasMoreTokens()) {
			word.set(tokenizer.nextToken());
			output.collect(word, one);
		}
	} else {
		log.info("article_dc_title not found, possible values:");
		for (Writable key : doc.keySet()) {
			log.info("KEY: | " + key.toString() + " |\n");
		}
	}*/
}
 
开发者ID:NLeSC,项目名称:benchmarking-elasticsearch,代码行数:35,代码来源:mapred2.java

示例10: map

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
public void map(Text docId, MapWritable doc, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
	WritableArrayWritable fields = (WritableArrayWritable) doc.get(new Text("text_content"));
	Writable content = fields.get()[0];
	StringTokenizer tokenizer = new StringTokenizer(content.toString());
	while (tokenizer.hasMoreTokens()) {
		String word = tokenizer.nextToken();
		if (countMap.containsKey(word)) {
			countMap.put(word, countMap.get(word) + 1);
		} else {
			countMap.put(word, 1);
		}
		//word.set(tokenizer.nextToken());
		//output.collect(word, one);
	}
	
	/*for (Writable val : doc.values()) {
		word.set(val.toString());
		output.collect(word, one);
	}*/
	
	/*for (Writable key : doc.keySet()) {
		word.set(key.toString());
		output.collect(word, one);
	}*/
	
	/*Writable title = doc.get("article_dc_title");
	if (title != null) {
		log.info(title.toString());
		StringTokenizer tokenizer = new StringTokenizer(title.toString());
		while (tokenizer.hasMoreTokens()) {
			word.set(tokenizer.nextToken());
			output.collect(word, one);
		}
	} else {
		log.info("article_dc_title not found, possible values:");
		for (Writable key : doc.keySet()) {
			log.info("KEY: | " + key.toString() + " |\n");
		}
	}*/
}
 
开发者ID:NLeSC,项目名称:benchmarking-elasticsearch,代码行数:41,代码来源:mapred4.java

示例11: map

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
public void map(Text docId, MapWritable doc, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
	WritableArrayWritable fields = (WritableArrayWritable) doc.get(new Text("text_content"));
	Writable title = fields.get()[0];
	String cleanLine = title.toString().toLowerCase().replaceAll("[_|$#<>\\^=\\[\\]\\*/\\\\,;,.\\-:()?!\"']", " ");
	StringTokenizer tokenizer = new StringTokenizer(cleanLine);
	while (tokenizer.hasMoreTokens()) {
		String w = tokenizer.nextToken();
		if (w.length() < 2) {
			continue;
		}
		word.set(w);
		output.collect(word, one);
	}
	
	/*for (Writable val : doc.values()) {
		word.set(val.toString());
		output.collect(word, one);
	}*/
	
	/*for (Writable key : doc.keySet()) {
		word.set(key.toString());
		output.collect(word, one);
	}*/
	
	/*Writable title = doc.get("article_dc_title");
	if (title != null) {
		log.info(title.toString());
		StringTokenizer tokenizer = new StringTokenizer(title.toString());
		while (tokenizer.hasMoreTokens()) {
			word.set(tokenizer.nextToken());
			output.collect(word, one);
		}
	} else {
		log.info("article_dc_title not found, possible values:");
		for (Writable key : doc.keySet()) {
			log.info("KEY: | " + key.toString() + " |\n");
		}
	}*/
}
 
开发者ID:NLeSC,项目名称:benchmarking-elasticsearch,代码行数:40,代码来源:mapred3.java

示例12: write

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
@Override
public void write(NullWritable key, Writable value) throws IOException {
	
  log.info("SolrRecordWriter ->  write");

	if(solr == null) {
    solr = SolrOperations.getSolrServer(conf);
	}

  SolrInputDocument doc = new SolrInputDocument();
  if(value.getClass().getName().equals("org.apache.hadoop.io.MapWritable")){
    MapWritable valueMap = (MapWritable) value;

    for(Writable keyWritable : valueMap.keySet()) {
      String fieldName = keyWritable.toString();
      Object fieldValue = valueMap.get(new Text(fieldName));
      // Need to add proper conversion of object to Schema field type
      doc.addField(fieldName, fieldValue.toString());
    }
  }
  else if(value.getClass().getName().equals("org.bigsolr.hadoop.SolrInputRecord")) {
    doc = (SolrInputDocument) value;
  }
  else {
    log.error("SolrRecordWriter write() Class for Value is not Supported: " + value.getClass().getName());
    System.exit(0);
  }
	
	try {
	  solr.add(doc);
	  //solr.commit(true,true);
	} catch (SolrServerException e) {
	  log.error("SolrRecordWriter-- solr.add(doc) failed");
    throw new IOException(e);
	}
	
}
 
开发者ID:mrt,项目名称:bigsolr,代码行数:38,代码来源:SolrRecordWriter.java

示例13: deserialize

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
@Override
public Object deserialize(Writable wr) throws SerDeException {
    if (!(wr instanceof MapWritable)) {
        throw new SerDeException("Expected MapWritable, received " + wr.getClass().getName());
    }

    final MapWritable input = (MapWritable) wr;
    final Text t = new Text();
    row.clear();

    for (int i = 0; i < fieldCount; i++) {
        t.set(columnNames.get(i));
        final Writable value = input.get(t);
        if (value != null && !NullWritable.get().equals(value)) {
            // parse as double to avoid NumberFormatException...
            // TODO:need more test,especially for type 'bigint'
            if (HIVE_TYPE_INT.equalsIgnoreCase(columnTypesArray.get(i))) {
                row.add(Double.valueOf(value.toString()).intValue());
            } else if (Cql3SerDe.HIVE_TYPE_SMALLINT.equalsIgnoreCase(columnTypesArray.get(i))) {
                row.add(Double.valueOf(value.toString()).shortValue());
            } else if (Cql3SerDe.HIVE_TYPE_TINYINT.equalsIgnoreCase(columnTypesArray.get(i))) {
                row.add(Double.valueOf(value.toString()).byteValue());
            } else if (Cql3SerDe.HIVE_TYPE_BIGINT.equalsIgnoreCase(columnTypesArray.get(i))) {
                row.add(Long.valueOf(value.toString()));
            } else if (Cql3SerDe.HIVE_TYPE_BOOLEAN.equalsIgnoreCase(columnTypesArray.get(i))) {
                row.add(Boolean.valueOf(value.toString()));
            } else if (Cql3SerDe.HIVE_TYPE_FLOAT.equalsIgnoreCase(columnTypesArray.get(i))) {
                row.add(Double.valueOf(value.toString()).floatValue());
            } else if (Cql3SerDe.HIVE_TYPE_DOUBLE.equalsIgnoreCase(columnTypesArray.get(i))) {
                row.add(Double.valueOf(value.toString()));
            } else {
                row.add(value.toString());
            }
        } else {
            row.add(null);
        }
    }

    return row;
}
 
开发者ID:kernel164,项目名称:hive-cassandra-dsc,代码行数:41,代码来源:Cql3SerDe.java

示例14: deserialize

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
@Override
public Object deserialize(Writable wr) throws SerDeException {

  if (!(wr instanceof MapWritable)) {
    throw new SerDeException("Expected MapWritable, received "
        + wr.getClass().getName());
  }

  final MapWritable input = (MapWritable) wr;
  final Text t = new Text();
  row.clear();

  for (int i = 0; i < fieldCount; i++) {
    t.set(columnNames.get(i));
    final Writable value = input.get(t);
    if (value != null && !NullWritable.get().equals(value)) {
      if (HIVE_TYPE_INT.equalsIgnoreCase(columnTypesArray[i])) {
        row.add(Double.valueOf(value.toString()).intValue());
      }else if (SolrSerDe.HIVE_TYPE_BOOLEAN.equalsIgnoreCase(columnTypesArray[i])) {
        row.add(Boolean.valueOf(value.toString()));
      } else if (SolrSerDe.HIVE_TYPE_FLOAT.equalsIgnoreCase(columnTypesArray[i])) {
        row.add(Double.valueOf(value.toString()).floatValue());
      } else if (SolrSerDe.HIVE_TYPE_DOUBLE.equalsIgnoreCase(columnTypesArray[i])) {
        row.add(Double.valueOf(value.toString()));
      } else {
        row.add(value.toString());
      }

    } else {
      row.add(null);
    }
  }
  return row;
}
 
开发者ID:amitjaspal,项目名称:solr-storagehandler,代码行数:35,代码来源:SolrSerDe.java

示例15: es2Json

import org.apache.hadoop.io.MapWritable; //导入方法依赖的package包/类
public JSONObject es2Json(MapWritable valueWritable) throws IOException, InterruptedException {
    JSONObject obj = new JSONObject();
    for (Writable keyWritable : valueWritable.keySet()) {
        String key = ((Text) keyWritable).toString();
        Writable valWritable = valueWritable.get(keyWritable);
        
        if (valWritable instanceof Text) {
            obj.put(key, valWritable.toString());
        } else if (valWritable instanceof IntWritable) {
            obj.put(key, ((IntWritable)valWritable).get());
        } else if (valWritable instanceof FloatWritable) {
            obj.put(key, ((FloatWritable)valWritable).get());
        } else if (valWritable instanceof LongWritable) {
            obj.put(key, ((LongWritable)valWritable).get());
        } else if (valWritable instanceof DoubleWritable) {
            obj.put(key, ((DoubleWritable)valWritable).get());
        } else if (valWritable instanceof BooleanWritable) {
            obj.put(key, ((BooleanWritable)valWritable).get());
        } else if (valWritable instanceof MapWritable) {
            obj.put(key, es2Json((MapWritable) valWritable));
        } else if (valWritable instanceof WritableArrayWritable) {
            WritableArrayWritable waw = (WritableArrayWritable) valWritable;
            Writable[] writable = waw.get();
            
            JSONArray array = new JSONArray();
            for (int i=0; i < writable.length; ++i) {
                Object o = writable[i];
                if (o instanceof MapWritable) {
                    array.add(es2Json((MapWritable) o));
                } else if (o instanceof Text) {
                    array.add(o.toString());
                } else if (o instanceof IntWritable) {
                    array.add(((IntWritable)o).get());
                } else if (o instanceof FloatWritable) {
                    array.add(((FloatWritable)o).get());
                } else if (o instanceof LongWritable) {
                    array.add(((LongWritable)o).get());
                } else if (o instanceof DoubleWritable) {
                    array.add(((DoubleWritable)o).get());
                }
            }
            obj.put(key, array);
        }
    }
    return obj;
}
 
开发者ID:chaopengio,项目名称:elasticsearch-mapreduce,代码行数:47,代码来源:Es2Json.java


注:本文中的org.apache.hadoop.io.MapWritable.get方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。