本文整理匯總了Java中org.apache.hadoop.io.MapWritable.entrySet方法的典型用法代碼示例。如果您正苦於以下問題:Java MapWritable.entrySet方法的具體用法?Java MapWritable.entrySet怎麽用?Java MapWritable.entrySet使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.io.MapWritable
的用法示例。
在下文中一共展示了MapWritable.entrySet方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: write
import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
@Override
public void write(Writable w) throws IOException {
MapWritable map = (MapWritable) w;
SolrInputDocument doc = new SolrInputDocument();
for (final Map.Entry<Writable, Writable> entry : map.entrySet()) {
String key = entry.getKey().toString();
if (entry.getValue() instanceof TimestampWritable) {
Timestamp t = ((TimestampWritable)entry.getValue()).getTimestamp();
doc.setField(key, dateFormat.format( new Date(t.getTime()) ));
} else if (entry.getValue() instanceof ShortWritable) {
doc.setField(key, ((ShortWritable)entry.getValue()).get());
} else {
doc.setField(key, entry.getValue().toString());
}
}
log.debug("doc:"+doc.toString());
table.save(doc);
}
示例2: writeMap
import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
public void writeMap(MapWritable mw) throws IOException {
out.writeMapHeader(mw.size());
for (Map.Entry<Writable, Writable> entry : mw.entrySet()) {
write(entry.getKey());
write(entry.getValue());
}
}
示例3: readProperties
import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
public static final Properties readProperties(DataInput in) throws IOException {
Properties props = new Properties();
MapWritable propsWritable = new MapWritable();
propsWritable.readFields(in);
for (Entry<Writable, Writable> prop : propsWritable.entrySet()) {
String key = prop.getKey().toString();
String value = prop.getValue().toString();
props.put(key,value);
}
return props;
}
示例4: map
import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
@Override
protected void map(LongWritable key, MapWritable value,
Context context)
throws
IOException, InterruptedException {
for (java.util.Map.Entry<Writable, Writable> entry : value
.entrySet()) {
context.write((Text) entry.getKey(), (Text) entry.getValue());
}
}
示例5: reduce
import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
public void reduce(Text key, Iterable<BookMapWritable> values, Context context) throws IOException, InterruptedException {
BookMapWritable data = new BookMapWritable();
for (MapWritable dataMap : values) {
for (Map.Entry<Writable, Writable> entry : dataMap.entrySet()) {
data.putIfAbsent(entry.getKey(), entry.getValue());
}
}
context.write(key, data);
}
示例6: reduce
import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
@Override
public void reduce(IntWritable docId, Iterable<MapWritable> documentsAnalyzed, Context context)
throws IOException, InterruptedException {
for (MapWritable documentAnalyzed : documentsAnalyzed) {
for (MapWritable.Entry<Writable, Writable> termEntry : documentAnalyzed.entrySet()) {
Text term = (Text) termEntry.getKey();
IntWritable freq = (IntWritable) termEntry.getValue();
Integer documentId = docId.get();
this.invertedIndex.addPosting(term, documentId, freq);
}
}
}
示例7: write
import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
@Override
public void write(Writable wrt) throws IOException{
MapWritable tuple = (MapWritable) wrt;
SolrInputDocument doc = new SolrInputDocument();
for(Map.Entry<Writable, Writable> entry:tuple.entrySet()){
doc.setField(entry.getKey().toString(), entry.getValue().toString());
}
solrDAO.saveDoc(doc);
return ;
}
示例8: toJavaScript
import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
/**
* Takes in a {@link MapWritable} and returns a {@link Scriptable} map.
*
* @param scope the JavaScript scope
* @param writable the value to convert
*
* @return the {@link Scriptable} map equivalent
*/
@Override
public Object toJavaScript(final Scriptable scope, final MapWritable writable) {
final Map<Object, Object> writableMap = new HashMap<>();
for (final Map.Entry<Writable, Writable> mapEntry : writable.entrySet()) {
writableMap.put(ConversionUtils.writableToJS(mapEntry.getKey(), scope),
ConversionUtils.writableToJS(mapEntry.getValue(), scope));
}
return JavaScriptUtils.asObject(scope, writableMap);
}
示例9: map
import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
@Override
public void map(Object key, MapWritable value, Context context)
throws IOException, InterruptedException {
for (Map.Entry<Writable, Writable> entry : value.entrySet()) {
word.set(entry.getValue().toString());
context.write(word, ONE);
}
}
示例10: readDataFromHdfs
import org.apache.hadoop.io.MapWritable; //導入方法依賴的package包/類
/**
* Read data from hdfs.
*
* @param conf is the hadoop configuration used to read the data from the HDFS.
* @param outputPath is the path of the HDFS data.
* @return json Map containing the violations that are present in the data on the HDFS.
* @throws IOException Signals that an I/O exception has occurred.
*/
private static Map<String, DataValidationReport> readDataFromHdfs(
Configuration conf, String outputPath) throws IOException {
Map<String, DataValidationReport> jsonMap = new HashMap<String, DataValidationReport>();
FileSystem fs = FileSystem.get(conf);
Path inFile = new Path(outputPath);
FileStatus[] fss = fs.listStatus(inFile);
Path path = null;
Text key = null;
DataViolationWritable value = null;
SequenceFile.Reader reader = null;
DataValidationReport report = null;
List<FileViolationsWritable> violationList = null;
for (FileStatus status : fss) {
path = status.getPath();
if (!((path.getName().equals(DataValidationConstants.HADOOP_SUCCESS_FILES)) || (path.getName()
.equals(DataValidationConstants.HADOOP_LOG_FILES)))) {
LOGGER.info("Going to read the file : [" +path.getName()+"] at path ["+path+"]");
reader = new SequenceFile.Reader(fs, path, conf);
DataViolationArrayWritable dvaw = null;
Map<Integer, Integer> fieldMap = null;
key = new Text();
value = new DataViolationWritable();
while (reader.next(key, value)) {
int dirtyTuple = value.getDirtyTuple();
int cleanTuple = value.getCleanTuple();
int totalViolations = value.getTotalViolations();
dvaw = value.getDataViolationArrayWritable();
fieldMap = new HashMap<Integer, Integer>();
MapWritable mapWritable = value.getFieldMap();
if (mapWritable != null) {
for (Map.Entry<Writable, Writable> pairs : mapWritable.entrySet()) {
int fieldNumber = ((IntWritable) pairs.getKey()).get();
int fieldViolations = ((IntWritable) pairs.getValue()).get();
fieldMap.put(fieldNumber, fieldViolations);
}
}
violationList = new ArrayList<FileViolationsWritable>();
FileViolationsWritable bean = null;
Writable[] arr = dvaw.get();
if (arr != null) {
for (int i = 0; i < arr.length; i++) {
bean = (FileViolationsWritable) arr[i];
violationList.add(bean);
}
}
report = new DataValidationReport();
report.setDirtyTuple(dirtyTuple);
report.setCleanTuple(cleanTuple);
report.setTotalViolations(totalViolations);
report.setFieldMap(fieldMap);
report.setViolationList(violationList);
jsonMap.put(key.toString(), report);
}
reader.close();
}
}
return jsonMap;
}