本文整理汇总了Java中org.apache.lucene.index.LeafReader.postings方法的典型用法代码示例。如果您正苦于以下问题:Java LeafReader.postings方法的具体用法?Java LeafReader.postings怎么用?Java LeafReader.postings使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.index.LeafReader
的用法示例。
在下文中一共展示了LeafReader.postings方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: printAnnotations
import org.apache.lucene.index.LeafReader; //导入方法依赖的package包/类
public static void printAnnotations(LeafReader reader, Term term) throws IOException {
System.out.println("Annotations for " + term);
final ByteArrayDataInput in = new ByteArrayDataInput();
final PostingsEnum postings = reader.postings(term, PostingsEnum.PAYLOADS);
for (int docID = postings.nextDoc(); docID != DocIdSetIterator.NO_MORE_DOCS; docID = postings.nextDoc()) {
final int freq = postings.freq();
System.out.println(" doc=" + docID + ", freq=" + freq);
for (int i = 0; i < freq; i++) {
postings.nextPosition();
final BytesRef payload = postings.getPayload();
in.reset(payload.bytes, payload.offset, payload.length);
System.out.println(" start=" + in.readVInt() + ", length=" + in.readVInt());
}
}
}
示例2: executeNeedleTests
import org.apache.lucene.index.LeafReader; //导入方法依赖的package包/类
private void executeNeedleTests(Analyzer analyzer) throws Exception {
String needle = getNeedle(analyzer);
int numFieldValues = 23;
Directory directory = buildNeedleIndex(needle, analyzer, numFieldValues);
IndexReader reader = DirectoryReader.open(directory);
LeafReaderContext ctx = reader.leaves().get(0);
LeafReader r = ctx.reader();
PostingsEnum dpe = r.postings(new Term(FIELD, needle), PostingsEnum.ALL);
int numTests = 0;
try {
while (dpe.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
int frq = dpe.freq();
int advanced = 0;
String[] fieldValues = r.document(dpe.docID()).getValues(FIELD);
while (++advanced < frq) {
dpe.nextPosition();
String rebuilt = SimpleAnalyzerUtil.substringFromMultiValuedFields(dpe.startOffset(),
dpe.endOffset(), fieldValues, analyzer.getOffsetGap(FIELD), " | ");
assertEquals(needle, rebuilt);
numTests++;
}
}
} finally {
reader.close();
directory.close();
}
assertEquals("number of tests", numFieldValues - 1, numTests);
}
示例3: main
import org.apache.lucene.index.LeafReader; //导入方法依赖的package包/类
@SuppressWarnings("resource")
public static void main(String[] args) throws Exception {
final Directory dir = new RAMDirectory();
final IndexWriterConfig conf = new IndexWriterConfig(new WhitespaceAnalyzer());
final IndexWriter writer = new IndexWriter(dir, conf);
final Document doc = new Document();
doc.add(new TextField("f", new TokenStream() {
final PositionIncrementAttribute pos = addAttribute(PositionIncrementAttribute.class);
final CharTermAttribute term = addAttribute(CharTermAttribute.class);
boolean first = true, done = false;
@Override
public boolean incrementToken() throws IOException {
if (done) {
return false;
}
if (first) {
term.setEmpty().append("a");
pos.setPositionIncrement(1);
first = false;
} else {
term.setEmpty().append("b");
pos.setPositionIncrement(0);
done = true;
}
return true;
}
}));
writer.addDocument(doc);
writer.close();
final DirectoryReader reader = DirectoryReader.open(dir);
final IndexSearcher searcher = new IndexSearcher(reader);
final LeafReader ar = reader.leaves().get(0).reader();
final TermsEnum te = ar.terms("f").iterator();
BytesRef scratch = new BytesRef();
while ((scratch = te.next()) != null) {
System.out.println(scratch.utf8ToString());
final PostingsEnum dape = ar.postings(new Term("f", scratch.utf8ToString()));
System.out.println(" doc=" + dape.nextDoc() + ", pos=" + dape.nextPosition());
}
System.out.println();
// try a phrase query with a slop
final PhraseQuery pqNoSlop = buildPhraseQuery(0);
System.out.println("searching for \"a b\"; num results = " + searcher.search(pqNoSlop, 10).totalHits);
final PhraseQuery pqSlop1 = buildPhraseQuery(1);
System.out.println("searching for \"a b\"~1; num results = " + searcher.search(pqSlop1, 10).totalHits);
final PhraseQuery pqSlop3 = buildPhraseQuery(3);
System.out.println("searching for \"a b\"~3; num results = " + searcher.search(pqSlop3, 10).totalHits);
final SpanNearQuery snqUnOrdered =
new SpanNearQuery(new SpanQuery[] { new SpanTermQuery(new Term("f", "a")),
new SpanTermQuery(new Term("f", "b")) }, 1, false);
System.out.println("searching for SpanNearUnordered('a', 'b'), slop=1; num results = "
+ searcher.search(snqUnOrdered, 10).totalHits);
final SpanNearQuery snqOrdered = new SpanNearQuery(new SpanQuery[] { new SpanTermQuery(new Term("f", "a")),
new SpanTermQuery(new Term("f", "b")) }, 1, true);
System.out.println("searching for SpanNearOrdered('a', 'b'), slop=1; num results = "
+ searcher.search(snqOrdered, 10).totalHits);
reader.close();
}