本文整理汇总了Java中org.apache.lucene.util.UnicodeUtil.BIG_TERM属性的典型用法代码示例。如果您正苦于以下问题:Java UnicodeUtil.BIG_TERM属性的具体用法?Java UnicodeUtil.BIG_TERM怎么用?Java UnicodeUtil.BIG_TERM使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类org.apache.lucene.util.UnicodeUtil
的用法示例。
在下文中一共展示了UnicodeUtil.BIG_TERM属性的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testSortMetaField
public void testSortMetaField() throws Exception {
createIndex("test");
ensureGreen();
final int numDocs = randomIntBetween(10, 20);
IndexRequestBuilder[] indexReqs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; ++i) {
indexReqs[i] = client().prepareIndex("test", "type", Integer.toString(i))
.setSource();
}
indexRandom(true, indexReqs);
SortOrder order = randomFrom(SortOrder.values());
SearchResponse searchResponse = client().prepareSearch()
.setQuery(matchAllQuery())
.setSize(randomIntBetween(1, numDocs + 5))
.addSort("_uid", order)
.execute().actionGet();
assertNoFailures(searchResponse);
SearchHit[] hits = searchResponse.getHits().getHits();
BytesRef previous = order == SortOrder.ASC ? new BytesRef() : UnicodeUtil.BIG_TERM;
for (int i = 0; i < hits.length; ++i) {
final BytesRef uid = new BytesRef(Uid.createUid(hits[i].getType(), hits[i].getId()));
assertThat(previous, order == SortOrder.ASC ? lessThan(uid) : greaterThan(uid));
previous = uid;
}
}
示例2: decrementKey
@Override
public TermDocIndexKey decrementKey(TermDocIndexKey previousKey) {
int termIndex = previousKey.index;
BytesRef docId = previousKey.docId;
do {
while ((docId = decrementDocId(termIndex, docId)) != null) {
int docIndex = acceptDoc(termIndex, docId);
if (docIndex >= 0) {
localDocIndex = docIndex;
return termDocIndexKey = new TermDocIndexKey(termIndex, docId);
}
}
docId = UnicodeUtil.BIG_TERM;
} while ((termIndex = decrementTermIndex(termIndex)) >= 0);
localDocIndex = -1;
return termDocIndexKey = null;
}
示例3: targetKeyInit
@Override
public TermDocIndexKey targetKeyInit(boolean ascending) throws IOException {
int termIndex = getTargetKeyIndexInit(ascending);
if (termIndex < 0) {
return null;
}
int rawTargetIdx = getTargetKeyIndex();
BytesRef initTargetDoc = targetDoc;
if (rawTargetIdx < termIndex) {
initTargetDoc = null;
} else if (rawTargetIdx > termIndex) {
initTargetDoc = UnicodeUtil.BIG_TERM;
}
TermDocIndexKey ret = new TermDocIndexKey(termIndex, initTargetDoc);
int docIndex = acceptDoc(termIndex, initTargetDoc);
if (docIndex >= 0) {
localDocIndex = docIndex;
return termDocIndexKey = ret;
} else if (ascending) {
return incrementKey(ret);
} else {
return decrementKey(ret);
}
}
示例4: highlightFields
/**
* Highlights the top-N passages from multiple fields.
* <p>
* Conceptually, this behaves as a more efficient form of:
* <pre class="prettyprint">
* Map m = new HashMap();
* for (String field : fields) {
* m.put(field, highlight(field, query, searcher, topDocs, maxPassages));
* }
* return m;
* </pre>
*
* @param fields field names to highlight.
* Must have a stored string value and also be indexed with offsets.
* @param query query to highlight.
* @param searcher searcher that was previously used to execute the query.
* @param topDocs TopDocs containing the summary result documents to highlight.
* @param maxPassages The maximum number of top-N ranked passages per-field used to
* form the highlighted snippets.
* @return Map keyed on field name, containing the array of formatted snippets
* corresponding to the documents in <code>topDocs</code>.
* If no highlights were found for a document, its value is <code>null</code>.
* @throws IOException if an I/O error occurred during processing
* @throws IllegalArgumentException if <code>field</code> was indexed without
* {@link IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS}
*/
public Map<String,String[]> highlightFields(String fields[], Query query, IndexSearcher searcher, TopDocs topDocs, int maxPassages) throws IOException {
final IndexReader reader = searcher.getIndexReader();
final ScoreDoc scoreDocs[] = topDocs.scoreDocs;
query = rewrite(query);
SortedSet<Term> queryTerms = new TreeSet<Term>();
query.extractTerms(queryTerms);
int docids[] = new int[scoreDocs.length];
for (int i = 0; i < docids.length; i++) {
docids[i] = scoreDocs[i].doc;
}
IndexReaderContext readerContext = reader.getContext();
List<AtomicReaderContext> leaves = readerContext.leaves();
BreakIterator bi = (BreakIterator)breakIterator.clone();
// sort for sequential io
Arrays.sort(docids);
Arrays.sort(fields);
// pull stored data:
LimitedStoredFieldVisitor visitor = new LimitedStoredFieldVisitor(fields, maxLength);
String contents[][] = new String[fields.length][docids.length];
for (int i = 0; i < docids.length; i++) {
searcher.doc(docids[i], visitor);
for (int j = 0; j < fields.length; j++) {
contents[j][i] = visitor.getValue(j).toString();
}
visitor.reset();
}
Map<String,String[]> highlights = new HashMap<String,String[]>();
for (int i = 0; i < fields.length; i++) {
String field = fields[i];
Term floor = new Term(field, "");
Term ceiling = new Term(field, UnicodeUtil.BIG_TERM);
SortedSet<Term> fieldTerms = queryTerms.subSet(floor, ceiling);
// TODO: should we have some reasonable defaults for term pruning? (e.g. stopwords)
Term terms[] = fieldTerms.toArray(new Term[fieldTerms.size()]);
Map<Integer,String> fieldHighlights = highlightField(field, contents[i], bi, terms, docids, leaves, maxPassages);
String[] result = new String[scoreDocs.length];
for (int j = 0; j < scoreDocs.length; j++) {
result[j] = fieldHighlights.get(scoreDocs[j].doc);
}
highlights.put(field, result);
}
return highlights;
}
示例5: MissingStringLastComparatorSource
public MissingStringLastComparatorSource() {
this(UnicodeUtil.BIG_TERM);
}