本文整理汇总了Java中org.apache.lucene.search.ScoreDoc类的典型用法代码示例。如果您正苦于以下问题:Java ScoreDoc类的具体用法?Java ScoreDoc怎么用?Java ScoreDoc使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ScoreDoc类属于org.apache.lucene.search包,在下文中一共展示了ScoreDoc类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkIndexContent
import org.apache.lucene.search.ScoreDoc; //导入依赖的package包/类
private void checkIndexContent(final String elementId,
final String fieldContent, final int expectedAmount) throws IOException {
final IndexReader reader = IndexManager.getInstance().getIndex().getIndexReader();
final IndexSearcher searcher = new IndexSearcher(reader);
final TopDocs topDocs = searcher.search(new TermQuery(new Term(FIELDNAME, fieldContent)), expectedAmount + 10);
assertNotNull(topDocs);
assertTrue(topDocs.totalHits == expectedAmount);
if(expectedAmount > 0) {
final ScoreDoc scoreDoc = topDocs.scoreDocs[0];
assertNotNull(scoreDoc);
final Document doc = reader.document(scoreDoc.doc);
assertNotNull(doc);
assertEquals(fieldContent, doc.get(FIELDNAME));
assertEquals(elementId, doc.get(IIndexElement.FIELD_ID));
assertEquals(INDEX_TYPE, doc.get(IIndexElement.FIELD_INDEX_TYPE));
}
}
示例2: doEquals
import org.apache.lucene.search.ScoreDoc; //导入依赖的package包/类
@Override
protected boolean doEquals(Object obj) {
InternalTopHits other = (InternalTopHits) obj;
if (from != other.from) return false;
if (size != other.size) return false;
if (topDocs.totalHits != other.topDocs.totalHits) return false;
if (topDocs.scoreDocs.length != other.topDocs.scoreDocs.length) return false;
for (int d = 0; d < topDocs.scoreDocs.length; d++) {
ScoreDoc thisDoc = topDocs.scoreDocs[d];
ScoreDoc otherDoc = other.topDocs.scoreDocs[d];
if (thisDoc.doc != otherDoc.doc) return false;
if (Double.compare(thisDoc.score, otherDoc.score) != 0) return false;
if (thisDoc.shardIndex != otherDoc.shardIndex) return false;
if (thisDoc instanceof FieldDoc) {
if (false == (otherDoc instanceof FieldDoc)) return false;
FieldDoc thisFieldDoc = (FieldDoc) thisDoc;
FieldDoc otherFieldDoc = (FieldDoc) otherDoc;
if (thisFieldDoc.fields.length != otherFieldDoc.fields.length) return false;
for (int f = 0; f < thisFieldDoc.fields.length; f++) {
if (false == thisFieldDoc.fields[f].equals(otherFieldDoc.fields[f])) return false;
}
}
}
return searchHits.equals(other.searchHits);
}
示例3: doHashCode
import org.apache.lucene.search.ScoreDoc; //导入依赖的package包/类
@Override
protected int doHashCode() {
int hashCode = from;
hashCode = 31 * hashCode + size;
hashCode = 31 * hashCode + topDocs.totalHits;
for (int d = 0; d < topDocs.scoreDocs.length; d++) {
ScoreDoc doc = topDocs.scoreDocs[d];
hashCode = 31 * hashCode + doc.doc;
hashCode = 31 * hashCode + Float.floatToIntBits(doc.score);
hashCode = 31 * hashCode + doc.shardIndex;
if (doc instanceof FieldDoc) {
FieldDoc fieldDoc = (FieldDoc) doc;
hashCode = 31 * hashCode + Arrays.hashCode(fieldDoc.fields);
}
}
hashCode = 31 * hashCode + searchHits.hashCode();
return hashCode;
}
示例4: replayRelatedMatches
import org.apache.lucene.search.ScoreDoc; //导入依赖的package包/类
public void replayRelatedMatches(ScoreDoc[] sd) throws IOException {
final LeafBucketCollector leafCollector = deferred.getLeafCollector(readerContext);
leafCollector.setScorer(this);
currentScore = 0;
currentDocId = -1;
if (maxDocId < 0) {
return;
}
for (ScoreDoc scoreDoc : sd) {
// Doc ids from TopDocCollector are root-level Reader so
// need rebasing
int rebased = scoreDoc.doc - readerContext.docBase;
if ((rebased >= 0) && (rebased <= maxDocId)) {
currentScore = scoreDoc.score;
currentDocId = rebased;
// We stored the bucket ID in Lucene's shardIndex property
// for convenience.
leafCollector.collect(rebased, scoreDoc.shardIndex);
}
}
}
示例5: getLastEmittedDocPerShard
import org.apache.lucene.search.ScoreDoc; //导入依赖的package包/类
public ScoreDoc[] getLastEmittedDocPerShard(ReducedQueryPhase reducedQueryPhase,
ScoreDoc[] sortedScoreDocs, int numShards) {
ScoreDoc[] lastEmittedDocPerShard = new ScoreDoc[numShards];
if (reducedQueryPhase.isEmpty() == false) {
// from is always zero as when we use scroll, we ignore from
long size = Math.min(reducedQueryPhase.fetchHits, reducedQueryPhase.oneResult.size());
// with collapsing we can have more hits than sorted docs
size = Math.min(sortedScoreDocs.length, size);
for (int sortedDocsIndex = 0; sortedDocsIndex < size; sortedDocsIndex++) {
ScoreDoc scoreDoc = sortedScoreDocs[sortedDocsIndex];
lastEmittedDocPerShard[scoreDoc.shardIndex] = scoreDoc;
}
}
return lastEmittedDocPerShard;
}
示例6: next
import org.apache.lucene.search.ScoreDoc; //导入依赖的package包/类
@SneakyThrows
@Override
public LuceneSearchHit next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
if (current != null) {
current.unlinkSearcher();
}
i++;
ScoreDoc scoreDoc = topDocs.scoreDocs[i];
current = new LuceneSearchHitImpl(index.getName(), searcher, scoreDoc.doc, scoreDoc.score);
return current;
}
示例7: testSort
import org.apache.lucene.search.ScoreDoc; //导入依赖的package包/类
public void testSort() throws Exception {
List<CompletionSuggestion> suggestions = new ArrayList<>();
for (int i = 0; i < randomIntBetween(1, 5); i++) {
suggestions.add(new CompletionSuggestion(randomAsciiOfLength(randomIntBetween(1, 5)), randomIntBetween(1, 20)));
}
int nShards = randomIntBetween(1, 20);
int queryResultSize = randomBoolean() ? 0 : randomIntBetween(1, nShards * 2);
AtomicArray<QuerySearchResultProvider> results = generateQueryResults(nShards, suggestions, queryResultSize, false);
ScoreDoc[] sortedDocs = searchPhaseController.sortDocs(true, results);
int accumulatedLength = Math.min(queryResultSize, getTotalQueryHits(results));
for (Suggest.Suggestion<?> suggestion : reducedSuggest(results)) {
int suggestionSize = suggestion.getEntries().get(0).getOptions().size();
accumulatedLength += suggestionSize;
}
assertThat(sortedDocs.length, equalTo(accumulatedLength));
}
示例8: getTopShardDocs
import org.apache.lucene.search.ScoreDoc; //导入依赖的package包/类
private ScoreDoc[] getTopShardDocs(AtomicArray<QuerySearchResultProvider> results) throws IOException {
List<AtomicArray.Entry<QuerySearchResultProvider>> resultList = results.asList();
TopDocs[] shardTopDocs = new TopDocs[resultList.size()];
for (int i = 0; i < resultList.size(); i++) {
shardTopDocs[i] = resultList.get(i).value.queryResult().topDocs();
}
int topN = Math.min(results.get(0).queryResult().size(), getTotalQueryHits(results));
return TopDocs.merge(topN, shardTopDocs).scoreDocs;
}
示例9: main
import org.apache.lucene.search.ScoreDoc; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(INDEX_DIRECTORY)));
IndexSearcher indexSearcher = new IndexSearcher(reader);
Analyzer analyzer = new StandardAnalyzer();
QueryParser queryParser = new QueryParser(FIELD_CONTENTS, analyzer);
String searchString = "shakespeare";
Query query = queryParser.parse(searchString);
TopDocs results = indexSearcher.search(query, 5);
ScoreDoc[] hits = results.scoreDocs;
int numTotalHits = results.totalHits;
System.out.println(numTotalHits + " total matching documents");
for(int i=0;i<hits.length;++i) {
int docId = hits[i].doc;
Document d = indexSearcher.doc(docId);
System.out.println((i + 1) + ". " + d.get("path") + " score=" + hits[i].score);
}
}
示例10: get
import org.apache.lucene.search.ScoreDoc; //导入依赖的package包/类
/**
* Get the ResultDoc at the given location.
*
* @param i Index
* @return The ResultDoc
*/
public ResultDoc get(int i) {
//prtln("get() i: " + i + " num gets(): " + (++numGets));
if (i < 0 || i >= size())
throw new IndexOutOfBoundsException("Index " + i + " is out of bounds. Must be greater than or equal to 0 and less than " + size());
// First check to see if this List is backed by an array and return from there:
if (_resultDocs != null) {
return _resultDocs[i];
}
// If not backed by an array, fetch from the index:
ScoreDoc scoreDoc = _topDocs.scoreDocs[i];
return new ResultDoc(_resultDocConfig, scoreDoc.doc, scoreDoc.score);
}
示例11: synTokenQuery
import org.apache.lucene.search.ScoreDoc; //导入依赖的package包/类
private void synTokenQuery(String search, final int numbOfResults, final double minLuceneScore,
Map<String, Float> result, IndexSearcher searcher) throws ParseException, IOException {
QueryParser parser = new QueryParser(Version.LUCENE_46, "surfaceFormTokens",
new StandardAnalyzer(Version.LUCENE_46));
search = QueryParser.escape(search);
Query q = parser.parse(search);
/*
* Works only in String field!!
*/
// Query q = new FuzzyQuery(new Term("surfaceFormTokens",
// QueryParser.escape(search)), 2);
TopDocs top = searcher.search(q, numbOfResults);
for (ScoreDoc doc : top.scoreDocs) {
if (doc.score >= minLuceneScore) {
final String key = searcher.doc(doc.doc).get("conceptID");
if (result.getOrDefault(key, 0f) < doc.score) {
result.put(key, doc.score);
}
}
}
}
示例12: Search
import org.apache.lucene.search.ScoreDoc; //导入依赖的package包/类
/**
* 查询方法
* @throws IOException
* @throws CorruptIndexException
* @throws ParseException
*/
public List Search(String searchString,LuceneResultCollector luceneResultCollector) throws CorruptIndexException, IOException, ParseException{
//方法一:
System.out.println(this.indexSettings.getAnalyzer().getClass()+"----分词选择");
QueryParser q = new QueryParser(Version.LUCENE_44, "summary", this.indexSettings.getAnalyzer());
String search = new String(searchString.getBytes("ISO-8859-1"),"UTF-8");
System.out.println(search+"----------搜索的词语dd");
Query query = q.parse(search);
//方法二:
/*
Term t = new Term("title", searchString);
TermQuery query = new TermQuery(t);
*/
System.out.println(query.toString()+"--------query.tostring");
ScoreDoc[] docs = this.indexSearcher.search(query,100).scoreDocs;
System.out.println("一共有:"+docs.length+"条记录");
List result = luceneResultCollector.collect(docs, this.indexSearcher);
return result;
}
示例13: search
import org.apache.lucene.search.ScoreDoc; //导入依赖的package包/类
public Map<String, Integer> search(String word, String field, int maxSearch) {
if (indexSearcher == null) {
initialize(index);
}
Map<String, Integer> verbFreqs = new HashMap<>();
QueryParser queryParser = new QueryParser(Version.LUCENE_36, field, analyzer);
try {
Query query = queryParser.parse(word);
TopDocs topDocs = indexSearcher.search(query, maxSearch);
ScoreDoc[] doc = topDocs.scoreDocs;
for (int i = 0; i < maxSearch && i < doc.length; ++i) {
int documentId = doc[i].doc;
Document document = indexSearcher.doc(documentId);
String verb = document.get(VERB);
String frequency = document.get(FREQ);
verbFreqs.put(verb, Integer.parseInt(frequency));
}
} catch (ParseException | IOException e) {
log.warn("Error searching Lucene index.", e);
}
return verbFreqs;
}
示例14: replayRelatedMatches
import org.apache.lucene.search.ScoreDoc; //导入依赖的package包/类
public void replayRelatedMatches(ScoreDoc[] sd) throws IOException {
final LeafBucketCollector leafCollector = deferred.getLeafCollector(readerContext);
leafCollector.setScorer(this);
currentScore = 0;
currentDocId = -1;
if (maxDocId < 0) {
return;
}
for (ScoreDoc scoreDoc : sd) {
// Doc ids from TopDocCollector are root-level Reader so
// need rebasing
int rebased = scoreDoc.doc - readerContext.docBase;
if ((rebased >= 0) && (rebased <= maxDocId)) {
currentScore = scoreDoc.score;
currentDocId = rebased;
// We stored the bucket ID in Lucene's shardIndex property
// for convenience.
leafCollector.collect(rebased, scoreDoc.shardIndex);
}
}
}
示例15: innerExecuteFetchPhase
import org.apache.lucene.search.ScoreDoc; //导入依赖的package包/类
void innerExecuteFetchPhase() throws Exception {
boolean useScroll = request.scroll() != null;
sortedShardList = searchPhaseController.sortDocs(useScroll, queryResults);
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
if (docIdsToLoad.asList().isEmpty()) {
finishHim();
return;
}
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(
request, sortedShardList, firstResults.length()
);
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
QuerySearchResult queryResult = queryResults.get(entry.index);
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult, entry, lastEmittedDocPerShard);
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
}
}