本文整理匯總了Java中org.apache.lucene.queryparser.classic.QueryParser.escape方法的典型用法代碼示例。如果您正苦於以下問題:Java QueryParser.escape方法的具體用法?Java QueryParser.escape怎麽用?Java QueryParser.escape使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.lucene.queryparser.classic.QueryParser
的用法示例。
在下文中一共展示了QueryParser.escape方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: search
import org.apache.lucene.queryparser.classic.QueryParser; //導入方法依賴的package包/類
/**
* Given a search field to search,the name of the field to return results in
* and a query string, return search results up to the limit.
*
* @param searchfieldName
* @param returnFieldName
* @param queryStr
* @param limit
* @return search results (with confidences)
* @throws ParseException
* @throws IOException
*/
public HashMap<String[], Float> search(String searchfieldName,
String[] returnFieldName, String queryStr, int limit)
throws ParseException, IOException {
if (queryStr == null || queryStr.length() == 0)
return new HashMap<String[], Float>();
final String clean = QueryParser.escape(queryStr);
final Query q = new QueryParser(Version.LUCENE_40, searchfieldName,
analyser).parse(clean);
final TopScoreDocCollector collector = TopScoreDocCollector.create(
limit, true);
searcher.search(q, collector);
final ScoreDoc[] hits = collector.topDocs().scoreDocs;
final HashMap<String[], Float> results = new HashMap<String[], Float>();
for (int i = 0; i < hits.length; ++i) {
final int docId = hits[i].doc;
final Document d = searcher.doc(docId);
String[] rvalues = new String[returnFieldName.length];
for(int j=0;j<rvalues.length;j++){
rvalues[j]=d.get(returnFieldName[j]);
}
results.put(rvalues, hits[i].score);
}
return results;
}
示例2: synTokenQuery
import org.apache.lucene.queryparser.classic.QueryParser; //導入方法依賴的package包/類
private void synTokenQuery(String search, final int numbOfResults, final double minLuceneScore,
Map<String, Float> result, IndexSearcher searcher) throws ParseException, IOException {
QueryParser parser = new QueryParser(Version.LUCENE_46, "surfaceFormTokens",
new StandardAnalyzer(Version.LUCENE_46));
search = QueryParser.escape(search);
Query q = parser.parse(search);
/*
* Works only in String field!!
*/
// Query q = new FuzzyQuery(new Term("surfaceFormTokens",
// QueryParser.escape(search)), 2);
TopDocs top = searcher.search(q, numbOfResults);
for (ScoreDoc doc : top.scoreDocs) {
if (doc.score >= minLuceneScore) {
final String key = searcher.doc(doc.doc).get("conceptID");
if (result.getOrDefault(key, 0f) < doc.score) {
result.put(key, doc.score);
}
}
}
}
示例3: prepareLQuery
import org.apache.lucene.queryparser.classic.QueryParser; //導入方法依賴的package包/類
public String prepareLQuery(String rawquery, String colTitle) {
rawquery = rawquery.replaceAll(",", " ");
rawquery = rawquery.trim().replaceAll("_", " "); // StandardAnalyzer treats _ as new word
rawquery = rawquery.replaceAll("\\s+", " ");
String[] token = rawquery.split(" ");
String newTerm = "";
// prepare query for colName
for (int i = 0; i < token.length; i++) {
if (token[i] == null || "".equals(token[i]))
continue;
if (newTerm.equals("") == false )
newTerm += " AND ";
newTerm += colTitle + ":"
// + QueryParser.escape(token[i]) + "~0.6 "; // For Fuzzy Logic
+ QueryParser.escape(token[i]) + "~3 "; // For Fuzzy Logic
}
return newTerm;
}
示例4: searching
import org.apache.lucene.queryparser.classic.QueryParser; //導入方法依賴的package包/類
/**
* 索引から実際に検索を擔當するクラス
* @param propertiesPath .propertiesのパス
* @param query 検索クエリ。文字列、又はクエリファイルへのパス
* @throws IOException
* @throws ParseException
* @throws InterruptedException
* @throws ParserConfigurationException
* @throws TransformerException
*/
public static void searching(String propertiesPath, String query) throws IOException, ParseException, InterruptedException, ParserConfigurationException, TransformerException {
SpokendocBaseline spokendoc = new SpokendocBaseline(propertiesPath);
// クエリファイルから検索、XMLに出力
if (new File(query).exists()){
System.out.println("Search from queries file...");
searchFromFile(spokendoc, query);
}
// 1行のクエリから検索、標準出力
else{
System.out.println("Search from query string");
System.out.println("Query: " + query);
// クエリ文字列正規化
if (spokendoc.normalization) {
query = Util.normalizeString(query);
}
String tokenizedString = Util.joinWithSplitter(Tokenizer.tokenize(query, spokendoc.tokenizerPath), " ");
tokenizedString = QueryParser.escape(tokenizedString);
System.out.println(tokenizedString);
TopDocs results = searchFromString(spokendoc, query);
printResult(spokendoc, results);
}
}
示例5: getFuzzyCandidates
import org.apache.lucene.queryparser.classic.QueryParser; //導入方法依賴的package包/類
public List<LuceneCandidate> getFuzzyCandidates(String search, final int numbOfResults,
final double minLuceneScore) {
search = QueryParser.escape(search);
String fuzzySearch = "";
for (String s : search.split(" ")) {
fuzzySearch += s + "~ ";
}
// System.out.println("fuzzy Search = " + fuzzySearch);
// String fuzzySearch = search + "~";
return getNonFuzzyCandidates(fuzzySearch.trim(), numbOfResults, minLuceneScore);
}
示例6: normalizeQuery
import org.apache.lucene.queryparser.classic.QueryParser; //導入方法依賴的package包/類
/**
* Normalizes a query by escaping special lucene characters. Used to normalize automatically
* generated queries (e.g. in bootstrapping) that may contain special characters.
*
* @param query the Lucene query to be normalized
* @return a normalized version of the query
*/
public static String normalizeQuery(String query, boolean quoteIfSpace)
{
query = QueryParser.escape(QueryParser.escape(query.trim()));
if (quoteIfSpace && query.matches(".*\\s.*")) {
query = "\"" + query + "\"";
}
return query;
}
示例7: searchConcepts
import org.apache.lucene.queryparser.classic.QueryParser; //導入方法依賴的package包/類
@Override
public List<Concept> searchConcepts(Query query) {
QueryParser parser = getQueryParser();
// BooleanQuery finalQuery = new BooleanQuery();
Builder finalQueryBuilder = new BooleanQuery.Builder();
try {
if (query.isIncludeSynonyms() || query.isIncludeAbbreviations() || query.isIncludeAcronyms()) {
// BooleanQuery subQuery = new BooleanQuery();
Builder subQueryBuilder = new BooleanQuery.Builder();
subQueryBuilder.add(LuceneUtils.getBoostedQuery(parser, query.getInput(), 10.0f),
Occur.SHOULD);
String escapedQuery = QueryParser.escape(query.getInput());
if (query.isIncludeSynonyms()) {
subQueryBuilder.add(parser.parse(Concept.SYNONYM + ":" + escapedQuery), Occur.SHOULD);
}
if (query.isIncludeAbbreviations()) {
subQueryBuilder.add(parser.parse(Concept.ABREVIATION + ":" + escapedQuery), Occur.SHOULD);
}
if (query.isIncludeAcronyms()) {
subQueryBuilder.add(parser.parse(Concept.ACRONYM + ":" + escapedQuery), Occur.SHOULD);
}
finalQueryBuilder.add(subQueryBuilder.build(), Occur.MUST);
} else {
finalQueryBuilder.add(parser.parse(query.getInput()), Occur.MUST);
}
} catch (ParseException e) {
logger.log(Level.WARNING, "Failed to parse query", e);
}
addCommonConstraints(finalQueryBuilder, query);
IndexHits<Node> hits = null;
BooleanQuery finalQuery = finalQueryBuilder.build();
try (Transaction tx = graph.beginTx()) {
hits = graph.index().getNodeAutoIndexer().getAutoIndex().query(finalQuery);
tx.success();
}
return limitHits(hits, query);
}
示例8: searchFromString
import org.apache.lucene.queryparser.classic.QueryParser; //導入方法依賴的package包/類
/**
* クエリ文字列から1クエリの検索を行う
* @param spokendoc {@link SpokendocBaseline}のインスタンス
* @param queryString 検索クエリ文字列
* @return 検索結果のTopDocsオブジェクト
* @throws IOException
* @throws InterruptedException
* @throws ParseException
*/
private static TopDocs searchFromString(SpokendocBaseline spokendoc, String queryString)
throws IOException, InterruptedException, ParseException {
if (spokendoc.normalization) {
queryString = Util.normalizeString(queryString);
}
String tokenizedString = Util.joinWithSplitter(Tokenizer.tokenize(queryString, spokendoc.tokenizerPath), " ");
tokenizedString = QueryParser.escape(tokenizedString);
QueryParser parser = spokendoc.getQueryParser("content");
Query query = parser.parse(tokenizedString);
IndexSearcher searcher = spokendoc.getIndexSearcher();
TopDocs results = searcher.search(query, null, 1000);
return results;
}
示例9: formatAtomicQueryText
import org.apache.lucene.queryparser.classic.QueryParser; //導入方法依賴的package包/類
@Override
public String formatAtomicQueryText(String text, String originalText) {
return QueryParser.escape(text);
}
示例10: normalizeQuoteName
import org.apache.lucene.queryparser.classic.QueryParser; //導入方法依賴的package包/類
private static String normalizeQuoteName(String name) {
return "\"" + QueryParser.escape(name) + "\"";
}
示例11: main
import org.apache.lucene.queryparser.classic.QueryParser; //導入方法依賴的package包/類
public static void main(String[] args) {
try {
Builder builder = Settings.builder();
//builder.put("client.transport.sniff", true);
builder.put("cluster.name", "elasticsearch");
Settings settings = builder.build();
String stringQuery = "{\"match_all\" : { }}";
String q = QueryParser.escape(stringQuery);
System.out.println(q);
//System.out.println(new MatchAllQueryBuilder());
Client esClient = new PreBuiltTransportClient(settings).addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("localhost"), 9300));
QueryBuilder queryBuilder= QueryBuilders.wrapperQuery(stringQuery);
String[] fields = new String[2];
fields[0]="name";
fields[1]="name1";
SearchResponse scrollResp = esClient.prepareSearch("test")
.setTypes("test")
.setQuery(queryBuilder).setFetchSource(fields, null)
.setSize(250)//.addSort("name", SortOrder.ASC)
.get();
SearchHit[] searchHits = scrollResp.getHits().getHits();
for(SearchHit searchHit:searchHits){
System.out.println(searchHit.getSourceAsMap());
}
/*int i=0;
while (true) {
System.out.println(scrollResp);
System.out.println(scrollResp.getHits().totalHits());
//System.out.println(scrollResp.getHits().getHits().length);
//for (SearchHit hit : scrollResp.getHits().getHits()) {
//System.out.println(hit.sourceAsMap());
//System.out.println(i++);
// }
scrollResp = esClient.prepareSearchScroll(scrollResp.getScrollId()).setScroll(new TimeValue(60000)).execute().actionGet();
if (scrollResp.getHits().getHits().length == 0) {
break;
}
}*/
} catch (UnknownHostException e) {
e.printStackTrace();
}
}
示例12: mapQuery
import org.apache.lucene.queryparser.classic.QueryParser; //導入方法依賴的package包/類
public String mapQuery(MappingClass mapping, Object obj) {
String queryString = "";
String indexF = mapping.getIndexField();
int type = mapping.getMappingType();
// float fuzzyval = 0.600f; // default fuzzy value
int fuzzyval = 4; // default fuzzy value float depreacted
String lv = null; String hv = null;
if (obj == null) return queryString;
String term = obj.toString();
switch (type) {
case 0: // not applicable
break;
case 1: // Exact match
term.trim();
queryString = indexF + ":\"" + term + "\"";
break;
case 2:
case 3: // It may have multi-words
term.trim();
term = term.replaceAll(",", " ");
term = term.replaceAll("\\s+", " ");
String[] token = term.split(" ");
String newTerm = "";
for (int i = 0; i < token.length; i++) {
if (token[i] == null || "".equals(token[i]))
continue;
if (newTerm.equals("") == false && type == 3)
newTerm += " AND ";
if (newTerm.equals("") == false && type == 2)
newTerm += " OR ";
newTerm += indexF + ":"
+ QueryParser.escape(token[i]) + "~"+fuzzyval+ " "; // For Fuzzy Logic
}
queryString = newTerm;
break;
case 4:// It may have range Bound query
lv = mapping.getLowerrange().toString();
hv = mapping.getUpperrange().toString();
String ls = boundValue(obj,lv,0);
String hs = boundValue(obj,hv,0);
newTerm = indexF + ":[" + ls+ " TO " + hs+ "]";
queryString = newTerm;
break;
case 5: //starts with
int l = mapping.getStartswith();
newTerm = term.substring(0, l);
queryString = indexF + ":"+ QueryParser.escape(newTerm.trim()) + "*";
break;
case 6: //ends with
l = mapping.getEndsswith();
newTerm = term.substring(term.length() - l, term.length());
queryString = indexF + ":*"+ QueryParser.escape(newTerm.trim()) ;
break;
case 7:// It may have range Bound query
lv = mapping.getLowerrange().toString();
hv = mapping.getUpperrange().toString();
ls = boundValue(obj,lv,1);
hs = boundValue(obj,hv,1);
newTerm = indexF + ":[" + ls+ " TO " + hs+ "]";
queryString = newTerm;
break;
default:
break;
}
return queryString;
}