本文整理汇总了Java中org.apache.lucene.search.PhraseQuery.Builder方法的典型用法代码示例。如果您正苦于以下问题:Java PhraseQuery.Builder方法的具体用法?Java PhraseQuery.Builder怎么用?Java PhraseQuery.Builder使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.search.PhraseQuery
的用法示例。
在下文中一共展示了PhraseQuery.Builder方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: applySlop
import org.apache.lucene.search.PhraseQuery; //导入方法依赖的package包/类
private Query applySlop(Query q, int slop) {
if (q instanceof PhraseQuery) {
PhraseQuery pq = (PhraseQuery) q;
PhraseQuery.Builder builder = new PhraseQuery.Builder();
builder.setSlop(slop);
final Term[] terms = pq.getTerms();
final int[] positions = pq.getPositions();
for (int i = 0; i < terms.length; ++i) {
builder.add(terms[i], positions[i]);
}
pq = builder.build();
pq.setBoost(q.getBoost());
return pq;
} else if (q instanceof MultiPhraseQuery) {
((MultiPhraseQuery) q).setSlop(slop);
return q;
} else {
return q;
}
}
示例2: analyzePhrase
import org.apache.lucene.search.PhraseQuery; //导入方法依赖的package包/类
/**
* Creates simple phrase query from the cached tokenstream contents
*/
protected Query analyzePhrase(String field, TokenStream stream, int slop) throws IOException {
PhraseQuery.Builder builder = new PhraseQuery.Builder();
builder.setSlop(slop);
TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class);
PositionIncrementAttribute posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class);
int position = -1;
stream.reset();
while (stream.incrementToken()) {
if (enablePositionIncrements) {
position += posIncrAtt.getPositionIncrement();
} else {
position += 1;
}
builder.add(new Term(field, termAtt.getBytesRef()), position);
}
return builder.build();
}
示例3: applySlop
import org.apache.lucene.search.PhraseQuery; //导入方法依赖的package包/类
private Query applySlop(Query q, int slop) {
if (q instanceof PhraseQuery) {
PhraseQuery.Builder builder = new PhraseQuery.Builder();
builder.setSlop(slop);
PhraseQuery pq = (PhraseQuery) q;
org.apache.lucene.index.Term[] terms = pq.getTerms();
int[] positions = pq.getPositions();
for (int i = 0; i < terms.length; ++i) {
builder.add(terms[i], positions[i]);
}
q = builder.build();
}
else if (q instanceof MultiPhraseQuery) {
MultiPhraseQuery mpq = (MultiPhraseQuery) q;
if (slop != mpq.getSlop()) {
q = new MultiPhraseQuery.Builder(mpq).setSlop(slop).build();
}
}
return q;
}
示例4: addSlopToPhrase
import org.apache.lucene.search.PhraseQuery; //导入方法依赖的package包/类
/**
* Rebuild a phrase query with a slop value
*/
private PhraseQuery addSlopToPhrase(PhraseQuery query, int slop) {
PhraseQuery.Builder builder = new PhraseQuery.Builder();
builder.setSlop(slop);
final Term[] terms = query.getTerms();
final int[] positions = query.getPositions();
for (int i = 0; i < terms.length; ++i) {
builder.add(terms[i], positions[i]);
}
return builder.build();
}
示例5: createChainProximityQueryClauses
import org.apache.lucene.search.PhraseQuery; //导入方法依赖的package包/类
private void createChainProximityQueryClauses(BooleanQuery.Builder builder, String field, CachingTokenFilter stream, TermToBytesRefAttribute termAtt, OffsetAttribute offsetAtt) throws IOException {
Term termArr[] = new Term[2];
long offsetArr[] = new long[2];
for(int i=0;i<2;i++) {
termArr[i] = null;
offsetArr[i] = 0;
}
while (stream.incrementToken()) {
Term t = new Term(field, BytesRef.deepCopyOf(termAtt.getBytesRef()));
if(termArr[0] == null) {
termArr[0] = t;
offsetArr[0] = offsetAtt.startOffset();
} else if(termArr[1] == null) {
termArr[1] = t;
offsetArr[1] = offsetAtt.startOffset();
} else {
// shift
termArr[0] = termArr[1];
offsetArr[0] = offsetArr[1];
// fill
termArr[1] = t;
offsetArr[1] = offsetAtt.startOffset();
}
if(termArr[0] != null && termArr[1] != null) {
long offsetDiff = offsetArr[1] - offsetArr[0];
if(offsetDiff > 0) {
PhraseQuery.Builder pq = new PhraseQuery.Builder();
pq.setSlop((int) (offsetDiff) + 1);
pq.add(termArr[0]);
pq.add(termArr[1]);
builder.add(pq.build(), BooleanClause.Occur.SHOULD);
}
}
}
}
示例6: search
import org.apache.lucene.search.PhraseQuery; //导入方法依赖的package包/类
@Override
public List<Song> search(IndexType indexType, String searchString, FieldName... fieldsToSearchIn) {
try {
Directory directory = getIndex(indexType);
IndexReader indexReader = DirectoryReader.open(directory);
IndexSearcher indexSearcher = new IndexSearcher(indexReader);
BooleanQuery.Builder outerBooleanQueryBuilder = new BooleanQuery.Builder();
for (FieldName field : fieldsToSearchIn) {
PhraseQuery.Builder phraseQueryBuilder = new PhraseQuery.Builder();
for (String searchTerm : searchString.toLowerCase().split(TERM_SPLIT_REGEX)) {
phraseQueryBuilder.add(new Term(field.name(), searchTerm));
}
PhraseQuery phraseQuery = phraseQueryBuilder.build();
outerBooleanQueryBuilder.add(phraseQuery, Occur.SHOULD);
}
BooleanQuery outerBooleanQuery = outerBooleanQueryBuilder.build();
TopDocs hits = indexSearcher.search(outerBooleanQuery, Integer.MAX_VALUE);
LOG.debug("{} hits for filter \"{}\"", hits.totalHits, outerBooleanQueryBuilder);
List<Song> ret = new LinkedList<>();
for (ScoreDoc scoreDocument : hits.scoreDocs) {
Document document;
document = indexSearcher.doc(scoreDocument.doc);
String uuid = document.get(FieldName.UUID.name());
ret.add(songByUuid.get(uuid));
}
return ret;
} catch (Exception e) {
LOG.warn("problem while searching", e);
return new ArrayList<>(0);
}
}
示例7: addSlopToPhrase
import org.apache.lucene.search.PhraseQuery; //导入方法依赖的package包/类
/**
* Rebuild a phrase query with a slop value
*/
private PhraseQuery addSlopToPhrase(PhraseQuery query, int slop) {
PhraseQuery.Builder builder = new PhraseQuery.Builder();
builder.setSlop(slop);
org.apache.lucene.index.Term[] terms = query.getTerms();
int[] positions = query.getPositions();
for (int i = 0; i < terms.length; ++i) {
builder.add(terms[i], positions[i]);
}
return builder.build();
}
示例8: testMultiWordSynonyms
import org.apache.lucene.search.PhraseQuery; //导入方法依赖的package包/类
@Test
public void testMultiWordSynonyms() throws Exception {
QueryParser dumb = new QueryParser("field", new Analyzer1());
dumb.setSplitOnWhitespace(false);
TermQuery guinea = new TermQuery(new Term("field", "guinea"));
TermQuery pig = new TermQuery(new Term("field", "pig"));
TermQuery cavy = new TermQuery(new Term("field", "cavy"));
// A multi-word synonym source will form a graph query for synonyms that
// formed the graph token stream
BooleanQuery.Builder synonym = new BooleanQuery.Builder();
synonym.add(guinea, BooleanClause.Occur.SHOULD);
synonym.add(pig, BooleanClause.Occur.SHOULD);
BooleanQuery guineaPig = synonym.build();
// With the phrase operator, a multi-word synonym source will form a
// graph query with inner phrase queries.
PhraseQuery.Builder phraseSynonym = new PhraseQuery.Builder();
phraseSynonym.add(new Term("field", "guinea"));
phraseSynonym.add(new Term("field", "pig"));
PhraseQuery guineaPigPhrase = phraseSynonym.build();
// custom behavior, the synonyms are expanded, unless you use quote
// operator
QueryParser smart = new SmartQueryParser();
smart.setSplitOnWhitespace(false);
Assert.assertEquals(guineaPigPhrase, smart.parse("\"guinea pig\""));
}
示例9: testPhraseQueryPositionIncrements
import org.apache.lucene.search.PhraseQuery; //导入方法依赖的package包/类
@Test
public void testPhraseQueryPositionIncrements() throws Exception {
CharacterRunAutomaton stopStopList = new CharacterRunAutomaton(new RegExp("[sS][tT][oO][pP]").toAutomaton());
QueryParser qp = getParserConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false, stopStopList));
qp.setEnablePositionIncrements(true);
PhraseQuery.Builder phraseQuery = new PhraseQuery.Builder();
phraseQuery.add(new Term("field", "1"));
phraseQuery.add(new Term("field", "2"), 2);
Assert.assertEquals(phraseQuery.build(), getQuery("\"1 stop 2\"", qp));
}
示例10: testPhraseQueryPositionIncrements
import org.apache.lucene.search.PhraseQuery; //导入方法依赖的package包/类
public void testPhraseQueryPositionIncrements() throws Exception {
CharacterRunAutomaton stopStopList =
new CharacterRunAutomaton(new RegExp("[sS][tT][oO][pP]").toAutomaton());
CommonQueryParserConfiguration qp = getParserConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false, stopStopList));
qp = getParserConfig(
new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false, stopStopList));
qp.setEnablePositionIncrements(true);
PhraseQuery.Builder phraseQuery = new PhraseQuery.Builder();
phraseQuery.add(new Term("field", "1"));
phraseQuery.add(new Term("field", "2"), 2);
assertEquals(phraseQuery.build(), getQuery("\"1 stop 2\"",qp));
}
示例11: createPairedProximityQueryClauses
import org.apache.lucene.search.PhraseQuery; //导入方法依赖的package包/类
private void createPairedProximityQueryClauses(BooleanQuery.Builder builder, String field, CachingTokenFilter stream, TermToBytesRefAttribute termAtt, OffsetAttribute offsetAtt) throws IOException {
Term termArr[] = new Term[2];
long offsetArr[] = new long[2];
for(int i=0;i<2;i++) {
termArr[i] = null;
offsetArr[i] = 0;
}
int count = 0;
while (stream.incrementToken()) {
Term t = new Term(field, BytesRef.deepCopyOf(termAtt.getBytesRef()));
if(count % 2 == 0) {
termArr[0] = t;
offsetArr[0] = offsetAtt.startOffset();
} else {
termArr[1] = t;
offsetArr[1] = offsetAtt.startOffset();
long offsetDiff = offsetArr[1] - offsetArr[0];
if(offsetDiff > 0) {
PhraseQuery.Builder pq = new PhraseQuery.Builder();
pq.setSlop((int) (offsetDiff) + 1);
pq.add(termArr[0]);
pq.add(termArr[1]);
builder.add(pq.build(), BooleanClause.Occur.SHOULD);
}
termArr[0] = null;
termArr[1] = null;
}
count++;
}
if(termArr[0] != null) {
builder.add(new TermQuery(termArr[0]), BooleanClause.Occur.SHOULD);
termArr[0] = null;
}
}