本文整理汇总了Java中org.apache.lucene.queryparser.classic.QueryParser类的典型用法代码示例。如果您正苦于以下问题:Java QueryParser类的具体用法?Java QueryParser怎么用?Java QueryParser使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
QueryParser类属于org.apache.lucene.queryparser.classic包,在下文中一共展示了QueryParser类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: search
import org.apache.lucene.queryparser.classic.QueryParser; //导入依赖的package包/类
/**
* Searches the lucene store for a specific query
*
* @param <T> What type of information are we searching
* @param clazz The class of the information we are searching
* @param queryText The query text
* @return list of entities
* @throws ParseException the parse exception
*/
public final <T extends BaseEntity> List<Object[]> search(final Class<T> clazz, final String queryText) throws ParseException {
final FullTextEntityManager fullTextEntityManager = Search.getFullTextEntityManager(entityManager);
final SearchFactory searchFactory = fullTextEntityManager.getSearchFactory();
final QueryParser parser = new MultiFieldQueryParser(getClassLuceneFields(clazz), searchFactory.getAnalyzer(clazz));
final List<Query> parsedQueries = Arrays.stream(queryText.split("AND"))
.map(e -> parseQuery(e, parser))
.filter(Objects::nonNull)
.collect(Collectors.toList());
final BooleanQuery.Builder bq = new BooleanQuery.Builder();
parsedQueries.forEach(e -> bq.add(e, BooleanClause.Occur.MUST));
final FullTextQuery jpaQuery = fullTextEntityManager.createFullTextQuery(bq.build(), clazz);
jpaQuery.setProjection(ProjectionConstants.SCORE, ProjectionConstants.EXPLANATION, ProjectionConstants.THIS);
return (List<Object[]>) jpaQuery.getResultList();
}
示例2: search
import org.apache.lucene.queryparser.classic.QueryParser; //导入依赖的package包/类
/**
* Given a search field to search,the name of the field to return results in
* and a query string, return search results up to the limit.
*
* @param searchfieldName
* @param returnFieldName
* @param queryStr
* @param limit
* @return search results (with confidences)
* @throws ParseException
* @throws IOException
*/
public HashMap<String[], Float> search(String searchfieldName,
String[] returnFieldName, String queryStr, int limit)
throws ParseException, IOException {
if (queryStr == null || queryStr.length() == 0)
return new HashMap<String[], Float>();
final String clean = QueryParser.escape(queryStr);
final Query q = new QueryParser(Version.LUCENE_40, searchfieldName,
analyser).parse(clean);
final TopScoreDocCollector collector = TopScoreDocCollector.create(
limit, true);
searcher.search(q, collector);
final ScoreDoc[] hits = collector.topDocs().scoreDocs;
final HashMap<String[], Float> results = new HashMap<String[], Float>();
for (int i = 0; i < hits.length; ++i) {
final int docId = hits[i].doc;
final Document d = searcher.doc(docId);
String[] rvalues = new String[returnFieldName.length];
for(int j=0;j<rvalues.length;j++){
rvalues[j]=d.get(returnFieldName[j]);
}
results.put(rvalues, hits[i].score);
}
return results;
}
示例3: testTopLevel
import org.apache.lucene.queryparser.classic.QueryParser; //导入依赖的package包/类
public void testTopLevel() throws Exception {
Aggregation result;
if (randomBoolean()) {
result = testCase(new MatchAllDocsQuery(), topHits("_name").sort("string", SortOrder.DESC));
} else {
Query query = new QueryParser("string", new KeywordAnalyzer()).parse("d^1000 c^100 b^10 a^1");
result = testCase(query, topHits("_name"));
}
SearchHits searchHits = ((TopHits) result).getHits();
assertEquals(3L, searchHits.getTotalHits());
assertEquals("3", searchHits.getAt(0).getId());
assertEquals("type", searchHits.getAt(0).getType());
assertEquals("2", searchHits.getAt(1).getId());
assertEquals("type", searchHits.getAt(1).getType());
assertEquals("1", searchHits.getAt(2).getId());
assertEquals("type", searchHits.getAt(2).getType());
}
示例4: deleteIndexesByField
import org.apache.lucene.queryparser.classic.QueryParser; //导入依赖的package包/类
@Override
public void deleteIndexesByField(String field) {
if (!isNullOrEmpty(field)) {
try {
Query query;
QueryParser parser = new QueryParser(Version.LUCENE_4_9, field, analyzer);
parser.setAllowLeadingWildcard(true);
try {
query = parser.parse("*");
} catch (ParseException e) {
throw new IndexingException(errorMessage(
"could not remove full-text index for value " + field, 0));
}
synchronized (this) {
indexWriter.deleteDocuments(query);
commit();
}
} catch (IOException ioe) {
throw new IndexingException(errorMessage(
"could not remove full-text index for value " + field, 0));
} catch (VirtualMachineError vme) {
handleVirtualMachineError(vme);
}
}
}
示例5: main
import org.apache.lucene.queryparser.classic.QueryParser; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(INDEX_DIRECTORY)));
IndexSearcher indexSearcher = new IndexSearcher(reader);
Analyzer analyzer = new StandardAnalyzer();
QueryParser queryParser = new QueryParser(FIELD_CONTENTS, analyzer);
String searchString = "shakespeare";
Query query = queryParser.parse(searchString);
TopDocs results = indexSearcher.search(query, 5);
ScoreDoc[] hits = results.scoreDocs;
int numTotalHits = results.totalHits;
System.out.println(numTotalHits + " total matching documents");
for(int i=0;i<hits.length;++i) {
int docId = hits[i].doc;
Document d = indexSearcher.doc(docId);
System.out.println((i + 1) + ". " + d.get("path") + " score=" + hits[i].score);
}
}
示例6: search
import org.apache.lucene.queryparser.classic.QueryParser; //导入依赖的package包/类
public SearchResult search(String index, String queryString, int page) {
SearchResult searchResult = null;
try {
IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(Properties.getProperties().getProperty(Values.INDEX_LOCATION, Values.DEFAULT_INDEX_LOCATION))));
IndexSearcher searcher = new IndexSearcher(reader);
Analyzer analyzer = new StandardAnalyzer();
// Search over the titles only for the moment
QueryParser parser = new QueryParser(index, analyzer);
Query query = parser.parse(queryString);
searchResult = this.doPagingSearch(reader, searcher, query, queryString, page);
reader.close();
}
catch(Exception ex) {}
return searchResult;
}
示例7: synTokenQuery
import org.apache.lucene.queryparser.classic.QueryParser; //导入依赖的package包/类
private void synTokenQuery(String search, final int numbOfResults, final double minLuceneScore,
Map<String, Float> result, IndexSearcher searcher) throws ParseException, IOException {
QueryParser parser = new QueryParser(Version.LUCENE_46, "surfaceFormTokens",
new StandardAnalyzer(Version.LUCENE_46));
search = QueryParser.escape(search);
Query q = parser.parse(search);
/*
* Works only in String field!!
*/
// Query q = new FuzzyQuery(new Term("surfaceFormTokens",
// QueryParser.escape(search)), 2);
TopDocs top = searcher.search(q, numbOfResults);
for (ScoreDoc doc : top.scoreDocs) {
if (doc.score >= minLuceneScore) {
final String key = searcher.doc(doc.doc).get("conceptID");
if (result.getOrDefault(key, 0f) < doc.score) {
result.put(key, doc.score);
}
}
}
}
示例8: FbEntitySearcher
import org.apache.lucene.queryparser.classic.QueryParser; //导入依赖的package包/类
public FbEntitySearcher(String indexDir, int numOfDocs, String searchingStrategy) throws IOException {
LogInfo.begin_track("Constructing Searcher");
if (!searchingStrategy.equals("exact") && !searchingStrategy.equals("inexact"))
throw new RuntimeException("Bad searching strategy: " + searchingStrategy);
this.searchStrategy = searchingStrategy;
queryParser = new QueryParser(
Version.LUCENE_44,
FbIndexField.TEXT.fieldName(),
searchingStrategy.equals("exact") ? new KeywordAnalyzer() : new StandardAnalyzer(Version.LUCENE_44));
LogInfo.log("Opening index dir: " + indexDir);
IndexReader indexReader = DirectoryReader.open(SimpleFSDirectory.open(new File(indexDir)));
indexSearcher = new IndexSearcher(indexReader);
LogInfo.log("Opened index with " + indexReader.numDocs() + " documents.");
this.numOfDocs = numOfDocs;
LogInfo.end_track();
}
示例9: Search
import org.apache.lucene.queryparser.classic.QueryParser; //导入依赖的package包/类
/**
* 查询方法
* @throws IOException
* @throws CorruptIndexException
* @throws ParseException
*/
public List Search(String searchString,LuceneResultCollector luceneResultCollector) throws CorruptIndexException, IOException, ParseException{
//方法一:
System.out.println(this.indexSettings.getAnalyzer().getClass()+"----分词选择");
QueryParser q = new QueryParser(Version.LUCENE_44, "summary", this.indexSettings.getAnalyzer());
String search = new String(searchString.getBytes("ISO-8859-1"),"UTF-8");
System.out.println(search+"----------搜索的词语dd");
Query query = q.parse(search);
//方法二:
/*
Term t = new Term("title", searchString);
TermQuery query = new TermQuery(t);
*/
System.out.println(query.toString()+"--------query.tostring");
ScoreDoc[] docs = this.indexSearcher.search(query,100).scoreDocs;
System.out.println("一共有:"+docs.length+"条记录");
List result = luceneResultCollector.collect(docs, this.indexSearcher);
return result;
}
示例10: getQuery
import org.apache.lucene.queryparser.classic.QueryParser; //导入依赖的package包/类
@Override
public Query getQuery(Element e) throws ParserException {
String text = DOMUtils.getText(e);
try {
Query q = null;
if (unSafeParser != null) {
//synchronize on unsafe parser
synchronized (unSafeParser) {
q = unSafeParser.parse(text);
}
} else {
String fieldName = DOMUtils.getAttribute(e, "fieldName", defaultField);
//Create new parser
QueryParser parser = createQueryParser(fieldName, analyzer);
q = parser.parse(text);
}
q.setBoost(DOMUtils.getAttribute(e, "boost", 1.0f));
return q;
} catch (ParseException e1) {
throw new ParserException(e1.getMessage());
}
}
示例11: parseQuery
import org.apache.lucene.queryparser.classic.QueryParser; //导入依赖的package包/类
/**
* The standard query parser does not seem to parse queries how i expect.
* The problem occurs with integers and ranges,
* It also doesnt allow the user to normally say NOT ... at the start of the search.
* This method provides a simple abstraction over the normal query language and adds a tiny bit better support
* <p>
* Integer queries are wrapped in []
* <p>
* It allows a few things:
* 1. Prefixing queries with NOT, is treated like *:* AND NOT
* 2. Support for integers greater than [name:>integer] ie: [times:>100]
* 3. Support for integers lesser than [name:<integer] ie: [times:<100]
* 4. Support for integer ranges [name:[small_integer TO large_integer]] ie: [times:[1 TO 10]]
* 5. Support for integer range queries and normal queries in same query. ie: "a:1 AND [times:>1]"
*
* @param queryText our query string
* @param parser query parser
* @return query element
*/
private Query parseQuery(final String queryText, final QueryParser parser) {
final Mutable<Query> query = Mutable.of(null);
final Mutable<String> queryMessage = Mutable.of(queryText.trim());
// Ensure that NOT prefixes are instead *:* AND NOT
PatternMatcher.of(startsWithNotPattern, queryMessage.get())
.then(startsWithNotMatcher ->
queryMessage.mutate(queryMessage.get().replaceFirst("NOT", "*:* AND NOT")));
PatternMatcher.of(queryNumericPattern, queryMessage.get())
// Process integer range
.then(queryNumericMatcher -> {
final String fieldName = queryNumericMatcher.group(1).trim();
final String fieldQuery = queryNumericMatcher.group(2).trim();
final Long[] ranges = getRangeValues(fieldQuery);
// query.mutate(LongPoint.newRangeQuery(fieldName, ranges[0], ranges[1]));
query.mutate(NumericRangeQuery.newLongRange(fieldName, ranges[0], ranges[1], true, true));
})
// This is a normal string query
.otherwise(queryNumericMatcher -> query.mutate(wrappedQueryParserParse(queryMessage.get(), parser)));
return query.get();
}
示例12: search
import org.apache.lucene.queryparser.classic.QueryParser; //导入依赖的package包/类
/**
* Search sample.
*
* @param directory the index directory.
* @throws IOException in case of I/O failure.
* @throws ParseException in case of Query parse exception.
*/
public static void search(Directory directory) throws IOException, ParseException {
IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(directory));
Query query = new QueryParser("title", new StandardAnalyzer()).parse("title:Solr");
TopDocs matches = searcher.search(query, 10);
System.out.println("Search returned " + matches.totalHits + " matches.");
Arrays.stream(matches.scoreDocs)
.map(scoreDoc -> luceneDoc(scoreDoc, searcher))
.forEach(doc -> {
System.out.println("-------------------------------------");
System.out.println("ID:\t" + doc.get("id"));
System.out.println("TITLE:\t" + doc.get("title"));
System.out.println("AUTHOR:\t" + doc.get("author"));
System.out.println("SCORE:\t" + doc.get("score"));
});
}
示例13: findAll
import org.apache.lucene.queryparser.classic.QueryParser; //导入依赖的package包/类
/**
* Executes a query for all documents in the index.
*
* @throws Exception never, otherwise the test fails.
*/
@Test
public void findAll() throws Exception {
IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(directory));
Query query = new QueryParser("title", new StandardAnalyzer()).parse("Solr");
TopDocs matches = searcher.search(query, 10);
assertEquals(3, matches.totalHits);
Set<String> expected = new HashSet<String>();
expected.add("1");
expected.add("2");
expected.add("3");
Set<String> result = Arrays.stream(matches.scoreDocs)
.map(scoreDoc -> luceneDoc(scoreDoc.doc, searcher))
.map(doc -> doc.get("id"))
.collect(Collectors.toCollection(HashSet::new));
assertEquals(expected, result);
}
示例14: findByAuthorSurname
import org.apache.lucene.queryparser.classic.QueryParser; //导入依赖的package包/类
/**
* Search all books of a given author.
*
* @throws Exception never, otherwise the test fails.
*/
@Test
public void findByAuthorSurname() throws Exception {
IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(directory));
Query query = new QueryParser("author", new StandardAnalyzer()).parse("Gazzarini");
TopDocs matches = searcher.search(query, 10);
assertEquals(1, matches.totalHits);
final String id = Arrays.stream(matches.scoreDocs)
.map(scoreDoc -> luceneDoc(scoreDoc.doc, searcher))
.map(doc -> doc.get("id"))
.findFirst()
.get();
assertEquals("1", id);
}
示例15: initialize
import org.apache.lucene.queryparser.classic.QueryParser; //导入依赖的package包/类
@Override
public void initialize(UimaContext context) throws ResourceInitializationException {
super.initialize(context);
// initialize sentence chunker
TokenizerFactory tokenizerFactory = UimaContextHelper.createObjectFromConfigParameter(context,
"tokenizer-factory", "tokenizer-factory-params", IndoEuropeanTokenizerFactory.class,
TokenizerFactory.class);
SentenceModel sentenceModel = UimaContextHelper.createObjectFromConfigParameter(context,
"sentence-model", "sentence-model-params", IndoEuropeanSentenceModel.class,
SentenceModel.class);
chunker = new SentenceChunker(tokenizerFactory, sentenceModel);
// initialize hits
hits = UimaContextHelper.getConfigParameterIntValue(context, "hits", 200);
// initialize query analyzer, index writer config, and query parser
analyzer = UimaContextHelper.createObjectFromConfigParameter(context, "query-analyzer",
"query-analyzer-params", StandardAnalyzer.class, Analyzer.class);
parser = new QueryParser("text", analyzer);
// initialize query string constructor
queryStringConstructor = UimaContextHelper.createObjectFromConfigParameter(context,
"query-string-constructor", "query-string-constructor-params",
BooleanBagOfPhraseQueryStringConstructor.class, QueryStringConstructor.class);
}