當前位置: 首頁>>代碼示例>>Java>>正文


Java MapWritable.entrySet方法代碼示例

本文整理匯總了Java中org.apache.hadoop.io.MapWritable.entrySet方法的典型用法代碼示例。如果您正苦於以下問題:Java MapWritable.entrySet方法的具體用法?Java MapWritable.entrySet怎麽用?Java MapWritable.entrySet使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.io.MapWritable的用法示例。


在下文中一共展示了MapWritable.entrySet方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: write

import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
@Override
public void write(Writable w) throws IOException {
	MapWritable map = (MapWritable) w;
	SolrInputDocument doc = new SolrInputDocument();
	for (final Map.Entry<Writable, Writable> entry : map.entrySet()) {
		String key = entry.getKey().toString();
		
		if (entry.getValue() instanceof TimestampWritable) {
			Timestamp t = ((TimestampWritable)entry.getValue()).getTimestamp();
			doc.setField(key, dateFormat.format( new Date(t.getTime()) ));
		} else if (entry.getValue() instanceof ShortWritable) {
		    doc.setField(key, ((ShortWritable)entry.getValue()).get());
		} else {
			doc.setField(key, entry.getValue().toString());
		}
		
	}
	log.debug("doc:"+doc.toString());
	table.save(doc);
}
 
開發者ID:vroyer,項目名稱:hive-solr-search,代碼行數:21,代碼來源:SolrWriter.java

示例2: writeMap

import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
public void writeMap(MapWritable mw) throws IOException {
  out.writeMapHeader(mw.size());
  for (Map.Entry<Writable, Writable> entry : mw.entrySet()) {
    write(entry.getKey());
    write(entry.getValue());
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:TypedBytesWritableOutput.java

示例3: readProperties

import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
public static final Properties readProperties(DataInput in) throws IOException {
  Properties props = new Properties();
  MapWritable propsWritable = new MapWritable();
  propsWritable.readFields(in);
  for (Entry<Writable, Writable> prop : propsWritable.entrySet()) {
    String key = prop.getKey().toString();
    String value = prop.getValue().toString();
    props.put(key,value);
  }
  return props;
}
 
開發者ID:jianglibo,項目名稱:gora-boot,代碼行數:12,代碼來源:WritableUtils.java

示例4: map

import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
@Override
protected void map(LongWritable key, MapWritable value,
                   Context context)
    throws
    IOException, InterruptedException {

  for (java.util.Map.Entry<Writable, Writable> entry : value
      .entrySet()) {
    context.write((Text) entry.getKey(), (Text) entry.getValue());
  }
}
 
開發者ID:Hanmourang,項目名稱:hiped2,代碼行數:12,代碼來源:JsonMapReduce.java

示例5: reduce

import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
public void reduce(Text key, Iterable<BookMapWritable> values, Context context) throws IOException, InterruptedException {
    BookMapWritable data = new BookMapWritable();
    for (MapWritable dataMap : values) {
        for (Map.Entry<Writable, Writable> entry : dataMap.entrySet()) {
            data.putIfAbsent(entry.getKey(), entry.getValue());
        }
    }

    context.write(key, data);
}
 
開發者ID:mouse-reeve,項目名稱:book-merger,代碼行數:11,代碼來源:BookDataReducer.java

示例6: reduce

import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
@Override
public void reduce(IntWritable docId, Iterable<MapWritable> documentsAnalyzed, Context context)
  throws IOException, InterruptedException {
    for (MapWritable documentAnalyzed : documentsAnalyzed) {
        for (MapWritable.Entry<Writable, Writable> termEntry : documentAnalyzed.entrySet()) {
            Text term = (Text) termEntry.getKey();
            IntWritable freq = (IntWritable) termEntry.getValue();
            Integer documentId = docId.get();
            this.invertedIndex.addPosting(term, documentId, freq);
        }
    }
}
 
開發者ID:tomasdelvechio,項目名稱:YarnExamples,代碼行數:13,代碼來源:NutchReduce.java

示例7: write

import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
@Override
public void write(Writable wrt) throws IOException{
  MapWritable tuple = (MapWritable) wrt;
  SolrInputDocument doc = new SolrInputDocument();
  for(Map.Entry<Writable, Writable> entry:tuple.entrySet()){
    doc.setField(entry.getKey().toString(), entry.getValue().toString());
  }
  solrDAO.saveDoc(doc);
  return ;
}
 
開發者ID:amitjaspal,項目名稱:solr-storagehandler,代碼行數:11,代碼來源:SolrRecordWriter.java

示例8: toJavaScript

import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
/**
 * Takes in a {@link MapWritable} and returns a {@link Scriptable} map.
 *
 * @param scope the JavaScript scope
 * @param writable the value to convert
 *
 * @return the {@link Scriptable} map equivalent
 */
@Override
public Object toJavaScript(final Scriptable scope, final MapWritable writable) {
    final Map<Object, Object> writableMap = new HashMap<>();

    for (final Map.Entry<Writable, Writable> mapEntry : writable.entrySet()) {
        writableMap.put(ConversionUtils.writableToJS(mapEntry.getKey(), scope),
                        ConversionUtils.writableToJS(mapEntry.getValue(), scope));
    }

    return JavaScriptUtils.asObject(scope, writableMap);
}
 
開發者ID:apigee,項目名稱:lembos,代碼行數:20,代碼來源:MapWritableConverter.java

示例9: map

import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
@Override
public void map(Object key, MapWritable value, Context context)
    throws IOException, InterruptedException {
    for (Map.Entry<Writable, Writable> entry : value.entrySet()) {
        word.set(entry.getValue().toString());
        context.write(word, ONE);
    }
}
 
開發者ID:nielsbasjes,項目名稱:logparser,代碼行數:9,代碼來源:Wordcount.java

示例10: readDataFromHdfs

import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
/**
 * Read data from hdfs.
 *
 * @param conf is the hadoop configuration used to read the data from the HDFS.
 * @param outputPath is the path of the HDFS data.
 * @return json Map containing the violations that are present in the data on the HDFS.
 * @throws IOException Signals that an I/O exception has occurred.
 */
private static Map<String, DataValidationReport> readDataFromHdfs(
		Configuration conf, String outputPath) throws IOException {
	Map<String, DataValidationReport> jsonMap = new HashMap<String, DataValidationReport>();

	
	FileSystem fs = FileSystem.get(conf);
	Path inFile = new Path(outputPath);
	FileStatus[] fss = fs.listStatus(inFile);
	Path path = null;
	Text key = null;
	DataViolationWritable value = null;
	SequenceFile.Reader reader = null;
	DataValidationReport report = null;
	List<FileViolationsWritable> violationList = null;

	
	for (FileStatus status : fss) {
		path = status.getPath();

		
		if (!((path.getName().equals(DataValidationConstants.HADOOP_SUCCESS_FILES)) || (path.getName()
				.equals(DataValidationConstants.HADOOP_LOG_FILES)))) {
			LOGGER.info("Going to read the file : [" +path.getName()+"] at path ["+path+"]");
			reader = new SequenceFile.Reader(fs, path, conf);
			DataViolationArrayWritable dvaw = null;
			Map<Integer, Integer> fieldMap = null;
			key = new Text();
			value = new DataViolationWritable();
			while (reader.next(key, value)) {
				int dirtyTuple = value.getDirtyTuple();
				int cleanTuple = value.getCleanTuple();
				int totalViolations = value.getTotalViolations();
				dvaw = value.getDataViolationArrayWritable();
				fieldMap = new HashMap<Integer, Integer>();
				MapWritable mapWritable = value.getFieldMap();

				if (mapWritable != null) {
					for (Map.Entry<Writable, Writable> pairs : mapWritable.entrySet()) {
						int fieldNumber = ((IntWritable) pairs.getKey()).get();
						int fieldViolations = ((IntWritable) pairs.getValue()).get();
						fieldMap.put(fieldNumber, fieldViolations);
					}

				}
				violationList = new ArrayList<FileViolationsWritable>();
			    FileViolationsWritable bean = null;
				Writable[] arr = dvaw.get();

				if (arr != null) {
					for (int i = 0; i < arr.length; i++) {
						bean = (FileViolationsWritable) arr[i];
						violationList.add(bean);
					}
				}

				report = new DataValidationReport();
				report.setDirtyTuple(dirtyTuple);
				report.setCleanTuple(cleanTuple);
				report.setTotalViolations(totalViolations);
				report.setFieldMap(fieldMap);
				report.setViolationList(violationList);

				jsonMap.put(key.toString(), report);
			}
			reader.close();
		}
	}
	return jsonMap;
}
 
開發者ID:Impetus,項目名稱:jumbune,代碼行數:78,代碼來源:DataValidationJobExecutor.java


注:本文中的org.apache.hadoop.io.MapWritable.entrySet方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。