当前位置: 首页>>代码示例>>Java>>正文


Java QueryParser.escape方法代码示例

本文整理汇总了Java中org.apache.lucene.queryparser.classic.QueryParser.escape方法的典型用法代码示例。如果您正苦于以下问题:Java QueryParser.escape方法的具体用法?Java QueryParser.escape怎么用?Java QueryParser.escape使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.lucene.queryparser.classic.QueryParser的用法示例。


在下文中一共展示了QueryParser.escape方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: search

import org.apache.lucene.queryparser.classic.QueryParser; //导入方法依赖的package包/类
/**
 * Given a search field to search,the name of the field to return results in
 * and a query string, return search results up to the limit.
 * 
 * @param searchfieldName
 * @param returnFieldName
 * @param queryStr
 * @param limit
 * @return search results (with confidences)
 * @throws ParseException
 * @throws IOException
 */
public HashMap<String[], Float> search(String searchfieldName,
		String[] returnFieldName, String queryStr, int limit)
		throws ParseException, IOException {
	if (queryStr == null || queryStr.length() == 0)
		return new HashMap<String[], Float>();
	final String clean = QueryParser.escape(queryStr);
	final Query q = new QueryParser(Version.LUCENE_40, searchfieldName,
			analyser).parse(clean);
	final TopScoreDocCollector collector = TopScoreDocCollector.create(
			limit, true);

	searcher.search(q, collector);
	final ScoreDoc[] hits = collector.topDocs().scoreDocs;
	final HashMap<String[], Float> results = new HashMap<String[], Float>();
	for (int i = 0; i < hits.length; ++i) {
		final int docId = hits[i].doc;
		final Document d = searcher.doc(docId);
		String[] rvalues = new String[returnFieldName.length];
		for(int j=0;j<rvalues.length;j++){
			rvalues[j]=d.get(returnFieldName[j]);
		}
		results.put(rvalues, hits[i].score);
	}
	return results;
}
 
开发者ID:openimaj,项目名称:openimaj,代码行数:38,代码来源:QuickSearcher.java

示例2: synTokenQuery

import org.apache.lucene.queryparser.classic.QueryParser; //导入方法依赖的package包/类
private void synTokenQuery(String search, final int numbOfResults, final double minLuceneScore,
		Map<String, Float> result, IndexSearcher searcher) throws ParseException, IOException {

	QueryParser parser = new QueryParser(Version.LUCENE_46, "surfaceFormTokens",
			new StandardAnalyzer(Version.LUCENE_46));

	search = QueryParser.escape(search);

	Query q = parser.parse(search);
	/*
	 * Works only in String field!!
	 */
	// Query q = new FuzzyQuery(new Term("surfaceFormTokens",
	// QueryParser.escape(search)), 2);

	TopDocs top = searcher.search(q, numbOfResults);

	for (ScoreDoc doc : top.scoreDocs) {
		if (doc.score >= minLuceneScore) {
			final String key = searcher.doc(doc.doc).get("conceptID");
			if (result.getOrDefault(key, 0f) < doc.score) {
				result.put(key, doc.score);
			}
		}
	}
}
 
开发者ID:ag-sc,项目名称:JLink,代码行数:27,代码来源:LuceneRetrieval.java

示例3: prepareLQuery

import org.apache.lucene.queryparser.classic.QueryParser; //导入方法依赖的package包/类
public String prepareLQuery(String rawquery, String colTitle) {
    rawquery = rawquery.replaceAll(",", " ");
    rawquery = rawquery.trim().replaceAll("_", " "); // StandardAnalyzer treats _ as new word
    rawquery = rawquery.replaceAll("\\s+", " ");
    String[] token = rawquery.split(" ");
    String newTerm = "";

    // prepare query for colName
    for (int i = 0; i < token.length; i++) {
        if (token[i] == null || "".equals(token[i]))
            continue;

        if (newTerm.equals("") == false )
            newTerm += " AND ";

        newTerm += colTitle + ":"
                // + QueryParser.escape(token[i]) + "~0.6 "; // For Fuzzy Logic
                + QueryParser.escape(token[i]) + "~3 "; // For Fuzzy Logic
    }
    return newTerm;
}
 
开发者ID:arrahtech,项目名称:osdq-core,代码行数:22,代码来源:SimilarityCheckLucene.java

示例4: searching

import org.apache.lucene.queryparser.classic.QueryParser; //导入方法依赖的package包/类
/**
 * 索引から実際に検索を担当するクラス 
 * @param propertiesPath .propertiesのパス
 * @param query 検索クエリ。文字列、又はクエリファイルへのパス
 * @throws IOException
 * @throws ParseException
 * @throws InterruptedException
 * @throws ParserConfigurationException
 * @throws TransformerException
 */
public static void searching(String propertiesPath, String query) throws IOException, ParseException, InterruptedException, ParserConfigurationException, TransformerException {
	SpokendocBaseline spokendoc = new SpokendocBaseline(propertiesPath);

	// クエリファイルから検索、XMLに出力
	if (new File(query).exists()){
   		System.out.println("Search from queries file...");
	    searchFromFile(spokendoc, query);
	}
	// 1行のクエリから検索、標準出力
	else{
   		System.out.println("Search from query string");
        System.out.println("Query: " + query);
        // クエリ文字列正規化
        if (spokendoc.normalization) {
            query = Util.normalizeString(query);
		}
   		String tokenizedString = Util.joinWithSplitter(Tokenizer.tokenize(query, spokendoc.tokenizerPath), " ");
   		tokenizedString = QueryParser.escape(tokenizedString);
   		System.out.println(tokenizedString);
           TopDocs results = searchFromString(spokendoc, query);
        printResult(spokendoc, results);
	}
}
 
开发者ID:Kesin11,项目名称:Spokendoc-Baseline,代码行数:34,代码来源:Search.java

示例5: getFuzzyCandidates

import org.apache.lucene.queryparser.classic.QueryParser; //导入方法依赖的package包/类
public List<LuceneCandidate> getFuzzyCandidates(String search, final int numbOfResults,
		final double minLuceneScore) {

	search = QueryParser.escape(search);

	String fuzzySearch = "";
	for (String s : search.split(" ")) {
		fuzzySearch += s + "~ ";
	}
	// System.out.println("fuzzy Search = " + fuzzySearch);
	// String fuzzySearch = search + "~";
	return getNonFuzzyCandidates(fuzzySearch.trim(), numbOfResults, minLuceneScore);
}
 
开发者ID:ag-sc,项目名称:JLink,代码行数:14,代码来源:LuceneRetrieval.java

示例6: normalizeQuery

import org.apache.lucene.queryparser.classic.QueryParser; //导入方法依赖的package包/类
/**
 * Normalizes a query by escaping special lucene characters. Used to normalize automatically 
 * generated queries (e.g. in bootstrapping) that may contain special characters.
 *
 * @param 	query	the Lucene query to be normalized
 * @return	a normalized version of the query
 */
public static String normalizeQuery(String query, boolean quoteIfSpace)
{
	query = QueryParser.escape(QueryParser.escape(query.trim()));
	if (quoteIfSpace && query.matches(".*\\s.*")) {
		query = "\"" + query + "\"";
	}
	return query;
}
 
开发者ID:infolis,项目名称:infoLink,代码行数:16,代码来源:RegexUtils.java

示例7: searchConcepts

import org.apache.lucene.queryparser.classic.QueryParser; //导入方法依赖的package包/类
@Override
public List<Concept> searchConcepts(Query query) {
  QueryParser parser = getQueryParser();
  // BooleanQuery finalQuery = new BooleanQuery();
  Builder finalQueryBuilder = new BooleanQuery.Builder();
  try {
    if (query.isIncludeSynonyms() || query.isIncludeAbbreviations() || query.isIncludeAcronyms()) {
      // BooleanQuery subQuery = new BooleanQuery();
      Builder subQueryBuilder = new BooleanQuery.Builder();
      subQueryBuilder.add(LuceneUtils.getBoostedQuery(parser, query.getInput(), 10.0f),
          Occur.SHOULD);
      String escapedQuery = QueryParser.escape(query.getInput());
      if (query.isIncludeSynonyms()) {
        subQueryBuilder.add(parser.parse(Concept.SYNONYM + ":" + escapedQuery), Occur.SHOULD);
      }
      if (query.isIncludeAbbreviations()) {
        subQueryBuilder.add(parser.parse(Concept.ABREVIATION + ":" + escapedQuery), Occur.SHOULD);
      }
      if (query.isIncludeAcronyms()) {
        subQueryBuilder.add(parser.parse(Concept.ACRONYM + ":" + escapedQuery), Occur.SHOULD);
      }
      finalQueryBuilder.add(subQueryBuilder.build(), Occur.MUST);
    } else {
      finalQueryBuilder.add(parser.parse(query.getInput()), Occur.MUST);
    }
  } catch (ParseException e) {
    logger.log(Level.WARNING, "Failed to parse query", e);
  }
  addCommonConstraints(finalQueryBuilder, query);
  IndexHits<Node> hits = null;
  BooleanQuery finalQuery = finalQueryBuilder.build();

  try (Transaction tx = graph.beginTx()) {
    hits = graph.index().getNodeAutoIndexer().getAutoIndex().query(finalQuery);
    tx.success();
  }
  return limitHits(hits, query);
}
 
开发者ID:SciGraph,项目名称:SciGraph,代码行数:39,代码来源:VocabularyNeo4jImpl.java

示例8: searchFromString

import org.apache.lucene.queryparser.classic.QueryParser; //导入方法依赖的package包/类
/**
 * クエリ文字列から1クエリの検索を行う
 * @param spokendoc {@link SpokendocBaseline}のインスタンス
 * @param queryString 検索クエリ文字列
 * @return 検索結果のTopDocsオブジェクト
 * @throws IOException
 * @throws InterruptedException
 * @throws ParseException
 */
private static TopDocs searchFromString(SpokendocBaseline spokendoc, String queryString)
		throws IOException, InterruptedException, ParseException {
	if (spokendoc.normalization) {
		queryString = Util.normalizeString(queryString);
	}
	String tokenizedString = Util.joinWithSplitter(Tokenizer.tokenize(queryString, spokendoc.tokenizerPath), " ");
	tokenizedString = QueryParser.escape(tokenizedString);

	QueryParser parser = spokendoc.getQueryParser("content");
    Query query = parser.parse(tokenizedString);
	IndexSearcher searcher = spokendoc.getIndexSearcher();
    TopDocs results = searcher.search(query, null, 1000);
    return results;
}
 
开发者ID:Kesin11,项目名称:Spokendoc-Baseline,代码行数:24,代码来源:Search.java

示例9: formatAtomicQueryText

import org.apache.lucene.queryparser.classic.QueryParser; //导入方法依赖的package包/类
@Override
public String formatAtomicQueryText(String text, String originalText) {
  return QueryParser.escape(text);
}
 
开发者ID:oaqa,项目名称:bioasq,代码行数:5,代码来源:LuceneQueryStringConstructor.java

示例10: normalizeQuoteName

import org.apache.lucene.queryparser.classic.QueryParser; //导入方法依赖的package包/类
private static String normalizeQuoteName(String name) {
  return "\"" + QueryParser.escape(name) + "\"";
}
 
开发者ID:oaqa,项目名称:bioasq,代码行数:4,代码来源:LuceneInMemoryPassageScorer.java

示例11: main

import org.apache.lucene.queryparser.classic.QueryParser; //导入方法依赖的package包/类
public static void main(String[] args) {

		try {
			Builder builder = Settings.builder();
			//builder.put("client.transport.sniff", true);
			builder.put("cluster.name", "elasticsearch");

			Settings settings = builder.build();
			
			String stringQuery = "{\"match_all\" : {  }}";
			String q = QueryParser.escape(stringQuery);
			System.out.println(q);
			//System.out.println(new MatchAllQueryBuilder());
			Client esClient = new PreBuiltTransportClient(settings).addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("localhost"), 9300));
			
			
					
			
			QueryBuilder  queryBuilder= QueryBuilders.wrapperQuery(stringQuery);
			
			String[] fields = new String[2];
			fields[0]="name";
			fields[1]="name1";
			SearchResponse scrollResp  = esClient.prepareSearch("test")
			        .setTypes("test")			        
			        .setQuery(queryBuilder).setFetchSource(fields, null)
			        .setSize(250)//.addSort("name", SortOrder.ASC)
			        .get();
			
			SearchHit[] searchHits = scrollResp.getHits().getHits();
			for(SearchHit searchHit:searchHits){
				System.out.println(searchHit.getSourceAsMap());
			}
			
			/*int i=0;
			while (true) {
				System.out.println(scrollResp);
				System.out.println(scrollResp.getHits().totalHits());
	//System.out.println(scrollResp.getHits().getHits().length);
			    //for (SearchHit hit : scrollResp.getHits().getHits()) {
//System.out.println(hit.sourceAsMap());
			    	//System.out.println(i++);

			   // }
			    scrollResp = esClient.prepareSearchScroll(scrollResp.getScrollId()).setScroll(new TimeValue(60000)).execute().actionGet();
			    if (scrollResp.getHits().getHits().length == 0) {
			        break;
			    }
			}*/

		} catch (UnknownHostException e) {
			e.printStackTrace();
		}

	}
 
开发者ID:raghavendar-ts,项目名称:ElasticTab-Elasticsearch-to-Excel-Report,代码行数:56,代码来源:Test.java

示例12: mapQuery

import org.apache.lucene.queryparser.classic.QueryParser; //导入方法依赖的package包/类
public String mapQuery(MappingClass mapping, Object obj) {
	
    String queryString = "";
	String indexF = mapping.getIndexField();
	int type = mapping.getMappingType();

    // float fuzzyval = 0.600f; // default fuzzy value
    int fuzzyval = 4; // default fuzzy value float depreacted
    String lv = null; String hv = null;
    if (obj == null) return queryString;

   	String term = obj.toString();

    switch (type) {
    	case 0:	 // not applicable
    		break;
        case 1: // Exact match
            term.trim();
            queryString = indexF + ":\"" + term + "\"";
            break;
        case 2:
        case 3: // It may have multi-words
            term.trim();
            term = term.replaceAll(",", " ");
            term = term.replaceAll("\\s+", " ");
            String[] token = term.split(" ");
            String newTerm = "";
            for (int i = 0; i < token.length; i++) {
                if (token[i] == null || "".equals(token[i]))
                    continue;

                if (newTerm.equals("") == false && type == 3)
                    newTerm += " AND ";
                if (newTerm.equals("") == false && type == 2)
                    newTerm += " OR ";
                newTerm += indexF + ":"
                        + QueryParser.escape(token[i]) + "~"+fuzzyval+ " "; // For Fuzzy Logic
            }
            queryString = newTerm;
            break;
        case 4:// It may have range Bound query
            lv = mapping.getLowerrange().toString();
            hv = mapping.getUpperrange().toString();
            String ls = boundValue(obj,lv,0);
            String hs = boundValue(obj,hv,0);
            newTerm = indexF + ":[" + ls+ " TO " + hs+ "]";

            queryString = newTerm;
            break;
        case 5: //starts with
        	int l = mapping.getStartswith();
        	newTerm  = term.substring(0, l);
        	queryString = indexF + ":"+ QueryParser.escape(newTerm.trim()) + "*";
        	break;
        case 6: //ends with
        	l = mapping.getEndsswith();
        	newTerm = term.substring(term.length() - l, term.length());
        	queryString = indexF + ":*"+ QueryParser.escape(newTerm.trim()) ;
        	break;
        case 7:// It may have range Bound query
            lv = mapping.getLowerrange().toString();
            hv = mapping.getUpperrange().toString();
            ls = boundValue(obj,lv,1);
            hs = boundValue(obj,hv,1);
            newTerm = indexF + ":[" + ls+ " TO " + hs+ "]";

            queryString = newTerm;
            break;
        default:
            break;

    }
    
    return queryString;
}
 
开发者ID:arrahtech,项目名称:osdq-core,代码行数:76,代码来源:EntityResolutionLucene.java


注:本文中的org.apache.lucene.queryparser.classic.QueryParser.escape方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。