本文整理汇总了Java中org.elasticsearch.search.SearchHit.sourceAsMap方法的典型用法代码示例。如果您正苦于以下问题:Java SearchHit.sourceAsMap方法的具体用法?Java SearchHit.sourceAsMap怎么用?Java SearchHit.sourceAsMap使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.elasticsearch.search.SearchHit
的用法示例。
在下文中一共展示了SearchHit.sourceAsMap方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ComperableHitResult
import org.elasticsearch.search.SearchHit; //导入方法依赖的package包/类
public ComperableHitResult(SearchHit hit , String[] fieldsOrder ,String seperator) {
this.hit = hit;
Map<String, Object> hitAsMap = hit.sourceAsMap();
this.flattenMap = new HashMap<>();
List<String> results = new ArrayList<>();
this.isAllNull = true;
for(int i = 0 ; i< fieldsOrder.length ;i++){
String field = fieldsOrder[i];
Object result = Util.deepSearchInMap(hitAsMap,field);
if(result == null){
results.add("");
}
else {
this.isAllNull = false;
results.add(result.toString());
this.flattenMap.put(field,result);
}
}
this.comperator = Joiner.on(seperator).join(results);
}
示例2: getComparisonKey
import org.elasticsearch.search.SearchHit; //导入方法依赖的package包/类
private String getComparisonKey(List<Map.Entry<Field, Field>> t1ToT2FieldsComparison, SearchHit hit, boolean firstTable, Map<String, List<Object>> optimizationTermsFilterStructure) {
String key = "";
Map<String, Object> sourceAsMap = hit.sourceAsMap();
for (Map.Entry<Field, Field> t1ToT2 : t1ToT2FieldsComparison) {
//todo: change to our function find if key contains '.'
String name;
if (firstTable) name = t1ToT2.getKey().getName();
else name = t1ToT2.getValue().getName();
Object data = deepSearchInMap(sourceAsMap, name);
if (firstTable && useQueryTermsFilterOptimization) {
updateOptimizationData(optimizationTermsFilterStructure, data, t1ToT2.getValue().getName());
}
if (data == null)
key += "|null|";
else
key += "|" + data.toString() + "|";
}
return key;
}
示例3: hitsContains
import org.elasticsearch.search.SearchHit; //导入方法依赖的package包/类
private boolean hitsContains(SearchHit[] hits, Map<String, Object> matchMap) {
for(SearchHit hit : hits){
Map<String, Object> hitMap = hit.sourceAsMap();
boolean matchedHit = true;
for(Map.Entry<String,Object> entry: hitMap.entrySet()){
if(!matchMap.containsKey(entry.getKey())) {
matchedHit = false;
break;
}
if(!equalsWithNullCheck(matchMap.get(entry.getKey()), entry.getValue())){
matchedHit = false;
break;
}
}
if(matchedHit) return true;
}
return false;
}
示例4: getFieldValue
import org.elasticsearch.search.SearchHit; //导入方法依赖的package包/类
private Object getFieldValue(SearchHit hit, String fieldName) {
Map<String,Object> sourceAsMap = hit.sourceAsMap();
if(fieldName.contains(".")){
String[] split = fieldName.split("\\.");
return Util.searchPathInMap(sourceAsMap, split);
}
else if(sourceAsMap.containsKey(fieldName)){
return sourceAsMap.get(fieldName);
}
return null;
}
示例5: createHeadersAndFillDocsMap
import org.elasticsearch.search.SearchHit; //导入方法依赖的package包/类
private List<String> createHeadersAndFillDocsMap(boolean flat, SearchHit[] hits, List<Map<String, Object>> docsAsMap) {
Set<String> csvHeaders = new HashSet<>();
for(SearchHit hit : hits){
Map<String, Object> doc = hit.sourceAsMap();
Map<String, SearchHitField> fields = hit.getFields();
for(SearchHitField searchHitField : fields.values()){
doc.put(searchHitField.getName(),searchHitField.value());
}
mergeHeaders(csvHeaders, doc, flat);
if(this.indcludeId){
doc.put("_id", hit.id());
}
if(this.includeScore){
doc.put("_score", hit.score());
}
if(this.includeType){
doc.put("_type",hit.type());
}
docsAsMap.add(doc);
}
ArrayList<String> headersList = new ArrayList<>(csvHeaders);
if (this.indcludeId){
headersList.add("_id");
}
if (this.includeScore){
headersList.add("_score");
}
if (this.includeType){
headersList.add("_type");
}
return headersList;
}
示例6: createHeadersAndFillDocsMap
import org.elasticsearch.search.SearchHit; //导入方法依赖的package包/类
private List<String> createHeadersAndFillDocsMap(boolean flat, SearchHit[] hits, List<Map<String, Object>> docsAsMap) {
Set<String> csvHeaders = new HashSet<>();
for (SearchHit hit : hits) {
Map<String, Object> doc = hit.sourceAsMap();
Map<String, SearchHitField> fields = hit.getFields();
for (SearchHitField searchHitField : fields.values()) {
doc.put(searchHitField.getName(), searchHitField.value());
}
mergeHeaders(csvHeaders, doc, flat);
if (this.includeScore) {
doc.put("_score", hit.score());
}
if (this.includeType) {
doc.put("_type", hit.type());
}
if (this.includeId) {
doc.put("_id", hit.id());
}
docsAsMap.add(doc);
}
ArrayList<String> headersList = new ArrayList<>(csvHeaders);
if (this.includeScore) {
headersList.add("_score");
}
if (this.includeType) {
headersList.add("_type");
}
if (this.includeId) {
headersList.add("_id");
}
return headersList;
}
示例7: notLikeTests
import org.elasticsearch.search.SearchHit; //导入方法依赖的package包/类
@Test
public void notLikeTests() throws IOException, SqlParseException, SQLFeatureNotSupportedException{
//cant use string.format cause of %d
SearchHits response = query("SELECT name FROM " +TEST_INDEX + "/gotCharacters where name.firstname not like '%d' LIMIT 1000");
Assert.assertEquals(3, response.getTotalHits());
for(SearchHit hit : response.getHits()) {
Map<String, Object> sourceAsMap = hit.sourceAsMap();
String name = ((HashMap<String, Object>) sourceAsMap.get("name")).get("firstname").toString();
Assert.assertFalse(name+" was in not like %d",name.startsWith("d"));
}
}
示例8: onResponse
import org.elasticsearch.search.SearchHit; //导入方法依赖的package包/类
@Override
public void onResponse(SearchResponse response) {
long timeTaken = System.currentTimeMillis() - timeStartESQuery;
Aggregations aggregs = response.getAggregations();
Terms agg = aggregs.get("partition");
int numhits = 0;
int numBuckets = 0;
int alreadyprocessed = 0;
synchronized (buffer) {
// For each entry
for (Terms.Bucket entry : agg.getBuckets()) {
String key = (String) entry.getKey(); // bucket key
long docCount = entry.getDocCount(); // Doc count
int hitsForThisBucket = 0;
// filter results so that we don't include URLs we are already
// being processed
TopHits topHits = entry.getAggregations().get("docs");
for (SearchHit hit : topHits.getHits().getHits()) {
hitsForThisBucket++;
Map<String, Object> keyValues = hit.sourceAsMap();
String url = (String) keyValues.get("url");
LOG.debug("{} -> id [{}], _source [{}]", logIdprefix,
hit.getId(), hit.getSourceAsString());
// is already being processed - skip it!
if (beingProcessed.containsKey(url)) {
alreadyprocessed++;
continue;
}
Metadata metadata = fromKeyValues(keyValues);
buffer.add(new Values(url, metadata));
}
if (hitsForThisBucket > 0)
numBuckets++;
numhits += hitsForThisBucket;
LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix,
key, hitsForThisBucket, docCount, alreadyprocessed);
}
// Shuffle the URLs so that we don't get blocks of URLs from the
// same
// host or domain
Collections.shuffle((List) buffer);
}
LOG.info(
"{} ES query returned {} hits from {} buckets in {} msec with {} already being processed",
logIdprefix, numhits, numBuckets, timeTaken, alreadyprocessed);
esQueryTimes.addMeasurement(timeTaken);
eventCounter.scope("already_being_processed").incrBy(alreadyprocessed);
eventCounter.scope("ES_queries").incrBy(1);
eventCounter.scope("ES_docs").incrBy(numhits);
// remove lock
isInESQuery.set(false);
}