当前位置: 首页>>代码示例>>Java>>正文


Java IndexSearcher.doc方法代码示例

本文整理汇总了Java中org.apache.lucene.search.IndexSearcher.doc方法的典型用法代码示例。如果您正苦于以下问题:Java IndexSearcher.doc方法的具体用法?Java IndexSearcher.doc怎么用?Java IndexSearcher.doc使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.lucene.search.IndexSearcher的用法示例。


在下文中一共展示了IndexSearcher.doc方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testSimpleNumericOps

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public void testSimpleNumericOps() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));

    Document document = new Document();
    document.add(new TextField("_id", "1", Field.Store.YES));
    document.add(new LegacyIntField("test", 2, LegacyIntField.TYPE_STORED));
    indexWriter.addDocument(document);

    IndexReader reader = DirectoryReader.open(indexWriter);
    IndexSearcher searcher = new IndexSearcher(reader);
    TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
    Document doc = searcher.doc(topDocs.scoreDocs[0].doc);
    IndexableField f = doc.getField("test");
    assertThat(f.stringValue(), equalTo("2"));

    BytesRefBuilder bytes = new BytesRefBuilder();
    LegacyNumericUtils.intToPrefixCoded(2, 0, bytes);
    topDocs = searcher.search(new TermQuery(new Term("test", bytes.get())), 1);
    doc = searcher.doc(topDocs.scoreDocs[0].doc);
    f = doc.getField("test");
    assertThat(f.stringValue(), equalTo("2"));

    indexWriter.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:26,代码来源:SimpleLuceneTests.java

示例2: main

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(INDEX_DIRECTORY)));
	IndexSearcher indexSearcher = new IndexSearcher(reader);

	Analyzer analyzer = new StandardAnalyzer();
	QueryParser queryParser = new QueryParser(FIELD_CONTENTS, analyzer);
	String searchString = "shakespeare";
	Query query = queryParser.parse(searchString);

	TopDocs results = indexSearcher.search(query, 5);
	ScoreDoc[] hits = results.scoreDocs;

	int numTotalHits = results.totalHits;
	System.out.println(numTotalHits + " total matching documents");

	for(int i=0;i<hits.length;++i) {
		int docId = hits[i].doc;
		Document d = indexSearcher.doc(docId);
		System.out.println((i + 1) + ". " + d.get("path") + " score=" + hits[i].score);
	}
}
 
开发者ID:PacktPublishing,项目名称:Java-Data-Science-Cookbook,代码行数:22,代码来源:SearchFiles.java

示例3: run

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
private void run() throws IOException {
    IndexSearcher indexSearcher = new IndexSearcher(IndexReader.open(FSDirectory.open(new File(indexPath))));
    List<FieldList> fields = new ArrayList<>();
    Map<String, FieldList> fieldMap = new HashMap<>();
    for (int i = 0; i < indexSearcher.maxDoc(); i++) {
        Document document = indexSearcher.doc(i);
        String key = document.get(keyField);
        if (key.isEmpty()) {
            continue;
        }
        String value = document.get(valueField);
        int freq = Integer.parseInt(document.get(freqField));
        FieldList list = fieldMap.get(key);
        if (list == null) {
            list = new FieldList(key);
            fieldMap.put(key, list);
            fields.add(list);
        }
        list.add(value, freq);
    }
    try (PrintWriter writer = new PrintWriter(outputPath)) {
        fields.stream().sorted().limit(maxEntries).forEach(list -> writer.println(list.toString()));
    }
}
 
开发者ID:clearwsd,项目名称:clearwsd,代码行数:25,代码来源:LuceneIndexReader.java

示例4: search

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
static void search(String className, Indexer indexer, Collection<IndexingContext> contexts, List<? super ClassUsage> results) throws IOException {
    String searchString = crc32base64(className.replace('.', '/'));
    Query refClassQuery = indexer.constructQuery(ClassDependencyIndexCreator.FLD_NB_DEPENDENCY_CLASS.getOntology(), new StringSearchExpression(searchString));
    TopScoreDocCollector collector = TopScoreDocCollector.create(NexusRepositoryIndexerImpl.MAX_RESULT_COUNT, null);
    for (IndexingContext context : contexts) {
        IndexSearcher searcher = context.acquireIndexSearcher();
        try {
    searcher.search(refClassQuery, collector);
    ScoreDoc[] hits = collector.topDocs().scoreDocs;
    LOG.log(Level.FINER, "for {0} ~ {1} found {2} hits", new Object[] {className, searchString, hits.length});
    for (ScoreDoc hit : hits) {
        int docId = hit.doc;
        Document d = searcher.doc(docId);
        String fldValue = d.get(ClassDependencyIndexCreator.NB_DEPENDENCY_CLASSES);
        LOG.log(Level.FINER, "{0} uses: {1}", new Object[] {className, fldValue});
        Set<String> refClasses = parseField(searchString, fldValue, d.get(ArtifactInfo.NAMES));
        if (!refClasses.isEmpty()) {
            ArtifactInfo ai = IndexUtils.constructArtifactInfo(d, context);
            if (ai != null) {
                ai.setRepository(context.getRepositoryId());
                List<NBVersionInfo> version = NexusRepositoryIndexerImpl.convertToNBVersionInfo(Collections.singleton(ai));
                if (!version.isEmpty()) {
                    results.add(new ClassUsage(version.get(0), refClasses));
                }
            }
        }
    }
    } finally {
        context.releaseIndexSearcher(searcher);
    }
    }
}
 
开发者ID:apache,项目名称:incubator-netbeans,代码行数:33,代码来源:ClassDependencyIndexCreator.java

示例5: testOrdering

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
/**
 * Here, we verify that the order that we add fields to a document counts, and not the lexi order
 * of the field. This means that heavily accessed fields that use field selector should be added
 * first (with load and break).
 */
public void testOrdering() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));

    Document document = new Document();
    document.add(new TextField("_id", "1", Field.Store.YES));
    document.add(new TextField("#id", "1", Field.Store.YES));
    indexWriter.addDocument(document);

    IndexReader reader = DirectoryReader.open(indexWriter);
    IndexSearcher searcher = new IndexSearcher(reader);
    TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
    final ArrayList<String> fieldsOrder = new ArrayList<>();
    searcher.doc(topDocs.scoreDocs[0].doc, new StoredFieldVisitor() {
        @Override
        public Status needsField(FieldInfo fieldInfo) throws IOException {
            fieldsOrder.add(fieldInfo.name);
            return Status.YES;
        }
    });

    assertThat(fieldsOrder.size(), equalTo(2));
    assertThat(fieldsOrder.get(0), equalTo("_id"));
    assertThat(fieldsOrder.get(1), equalTo("#id"));

    indexWriter.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:33,代码来源:SimpleLuceneTests.java

示例6: toBean

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
@Override
public List<IfcProductRecordText> toBean(IndexSearcher indexSearcher, Query query, ScoreDoc[] hits) {
	List<IfcProductRecordText> hitRecords = new ArrayList<IfcProductRecordText>();  
	try {
		
		for (int i = 0; i < hits.length; i++) {
			ScoreDoc scoreDoc = hits[i];
			Document hitDoc = indexSearcher.doc(scoreDoc.doc);
			String oid = hitDoc.get(Key_Oid);
			String type = hitDoc.get(Key_Type);
			String name = hitDoc.get(Key_Name);
			String detail = hitDoc.get(Key_Detail);

			IfcProductRecordText record = new IfcProductRecordText();
			record.setOid(oid);
			record.setType(type);
			record.setName(name);
			record.setDetail(detail);
			
			hitRecords.add(record);
			
		}
	
	} catch (IOException e) {
		e.printStackTrace();
	} 
	return hitRecords;
}
 
开发者ID:shenan4321,项目名称:BIMplatform,代码行数:29,代码来源:IfcProductRecordTextSearch.java

示例7: doPagingSearch

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public SearchResult doPagingSearch(IndexReader reader, IndexSearcher searcher, Query query, String queryString, int page) throws IOException {
    TopDocs results = searcher.search(query, 20 * this.PAGELIMIT); // 20 pages worth of documents
    ScoreDoc[] hits = results.scoreDocs;

    int numTotalHits = results.totalHits;
    int start = this.PAGELIMIT * page;
    int end = Math.min(numTotalHits, (this.PAGELIMIT * (page + 1)));
    int noPages = numTotalHits / this.PAGELIMIT;

    if (noPages > 20) {
        noPages = 19;
    }

    List<Integer> pages = this.calculatePages(numTotalHits, noPages);
    List<ContentDTO> contentDTOList = new ArrayList<>();

    for (int i = start; i < end; i++) {
        Document doc = searcher.doc(hits[i].doc);
        String filepath = doc.get(Values.PATH);

        // Get the content out of database
        ContentDTO byId = this.contentDAO.getById(Helpers.tryParseInt(filepath, -1));
        if (byId != null) {
            contentDTOList.add(byId);
        }
    }

    return new SearchResult(numTotalHits, page, pages, queryString, contentDTOList);
}
 
开发者ID:boyter,项目名称:freemoz,代码行数:30,代码来源:Searcher.java

示例8: PubmedIndexProperties

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public PubmedIndexProperties(IndexSearcher indexSearcher) throws IOException {
	Query query = new TermQuery(GLOBAL_PROPERTIES_TERM);
	TopDocs topDocs = indexSearcher.search(query, 1);
	if (topDocs.totalHits < 1) {
		this.doc = new Document();
		Field globalPropertiesField = new Field(GLOBAL_PROPERTIES_FIELD, GLOBAL_PROPERTIES_VALUE, Field.Store.YES, Field.Index.NOT_ANALYZED);
		this.doc.add(globalPropertiesField);
	}
	else {
		int docId = topDocs.scoreDocs[0].doc;
		this.doc = indexSearcher.doc(docId);
	}
}
 
开发者ID:Bibliome,项目名称:bibliome-java-utils,代码行数:14,代码来源:PubmedIndexProperties.java

示例9: luceneDoc

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
/**
 * Returns a native Lucene Document. 
 * 
 * @param match the Document reference with the Lucene internal ID. 
 * @param searcher the {@link IndexSearcher}, which we'll use for executing searches.
 * @return a native Lucene Document.
 */
public static Document luceneDoc(ScoreDoc match, IndexSearcher searcher) {
	try {
		return searcher.doc(match.doc);
	} catch (Exception exception) {
		throw new RuntimeException(exception);
	}
}
 
开发者ID:agazzarini,项目名称:as-full-text-search-server,代码行数:15,代码来源:LuceneBasicFlowExample.java

示例10: luceneDoc

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
private Document luceneDoc(int id, IndexSearcher searcher) {
	try {
		return searcher.doc(id);
	} catch (Exception exception) {
		throw new RuntimeException(exception);
	}
}
 
开发者ID:agazzarini,项目名称:as-full-text-search-server,代码行数:8,代码来源:LuceneBasicFlowExampleTestCase.java

示例11: mergeDocuments

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
private static Document mergeDocuments(IndexSearcher searcher, ScoreDoc[] topHitsDocsIdFirst,
		ScoreDoc[] topHitsDocsIdSecond) throws IOException {
	Document firstDocument = searcher.doc(topHitsDocsIdFirst[0].doc);
	Document secondDocument = searcher.doc(topHitsDocsIdSecond[0].doc);

	Document newDocument = new Document();			
	concatenateTickets(firstDocument, secondDocument, newDocument, SUBJECT);
	concatenateTickets(firstDocument, secondDocument, newDocument, ID);
	return newDocument;
}
 
开发者ID:SAP,项目名称:cloud-c4c-ticket-duplicate-finder-ext,代码行数:11,代码来源:IndexService.java

示例12: search

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public static void search(String indexDir, String q) throws Exception {
  Directory dir = FSDirectory.open(Paths.get(indexDir));
  IndexReader reader = DirectoryReader.open(dir);
  IndexSearcher is = new IndexSearcher(reader);
  // Analyzer analyzer=new StandardAnalyzer(); // 标准分词器
  SmartChineseAnalyzer analyzer = new SmartChineseAnalyzer();
  QueryParser parser = new QueryParser("desc", analyzer);
  Query query = parser.parse(q);

  long start = System.currentTimeMillis();
  TopDocs hits = is.search(query, 10);
  long end = System.currentTimeMillis();
  System.out.println("匹配 " + q + " ,总共花费" + (end - start) + "毫秒" + "查询到" + hits.totalHits + "个记录");

  QueryScorer scorer = new QueryScorer(query);
  Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);
  SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter("<b><font color='red'>", "</font></b>");
  Highlighter highlighter = new Highlighter(simpleHTMLFormatter, scorer);
  highlighter.setTextFragmenter(fragmenter);
  for (ScoreDoc scoreDoc : hits.scoreDocs) {
    Document doc = is.doc(scoreDoc.doc);
    System.out.println(doc.get("city"));
    System.out.println(doc.get("desc"));
    String desc = doc.get("desc");
    if (desc != null) {
      TokenStream tokenStream = analyzer.tokenStream("desc", new StringReader(desc));
      System.out.println(highlighter.getBestFragment(tokenStream, desc));
    }
  }
  reader.close();
}
 
开发者ID:MiniPa,项目名称:cjs_ssms,代码行数:32,代码来源:SearcherTest.java

示例13: main

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public static void main(String []args) throws IOException, ParseException {


        Analyzer analyzer = new StandardAnalyzer();

        Directory directory = FSDirectory.open(Paths.get("/Users/admin/lucene"));

        DirectoryReader ireader = DirectoryReader.open(directory);

        IndexSearcher indexSearcher  = new IndexSearcher(ireader);

        QueryParser parser = new QueryParser("fieldname", analyzer);

        Query query = parser.parse("text");

        ScoreDoc[] hits = indexSearcher.search(query, 10, Sort.INDEXORDER).scoreDocs;

        for (int i = 0; i < hits.length; i++) {
            Document hitDoc = indexSearcher.doc(hits[i].doc);
            System.out.println(hitDoc.toString());
        }
        ireader.close();
        directory.close();

    }
 
开发者ID:felayman,项目名称:elasticsearch-full,代码行数:26,代码来源:SampleSearchDemo.java

示例14: main

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public static void main(String []args) throws IOException {
    Analyzer analyzer = new StandardAnalyzer();

    Directory directory = FSDirectory.open(Paths.get("/Users/admin/lucene"));

    DirectoryReader ireader = DirectoryReader.open(directory);

    IndexSearcher indexSearcher  = new IndexSearcher(ireader);

    Term term = new Term("fieldname","国");
    FuzzyQuery query = new FuzzyQuery(term);

    ScoreDoc[] hits = indexSearcher.search(query, 10, Sort.INDEXORDER).scoreDocs;

    for (int i = 0; i < hits.length; i++) {
        Document hitDoc = indexSearcher.doc(hits[i].doc);
        System.out.println(hitDoc.toString()+","+hits[i].score);
    }
    ireader.close();
    directory.close();

}
 
开发者ID:felayman,项目名称:elasticsearch-full,代码行数:23,代码来源:FuzzyQueryDemo.java

示例15: search

import org.apache.lucene.search.IndexSearcher; //导入方法依赖的package包/类
public List <NoteSearchResult> search (String query, boolean isCaseSensitive, boolean wholeWordsOnly, boolean isRegularExpression) throws IOException, ParseException
{
	if (isRegularExpression) {
		query = "/" + query + "/";
	}
	else if (wholeWordsOnly) {
		query = "\"" + query + "\"";
	}
	
	Directory index = FSDirectory.open (Paths.get(indexDirectoryPath));
	
	PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper (new StandardAnalyzer (), fieldAnalyzerLookup);
	
	Query parsedQuery = new QueryParser(isCaseSensitive ? "title_cs" : "title", analyzer).parse (query);
	
	IndexReader indexReader = DirectoryReader.open (index);
	
	IndexSearcher indexSearcher = new IndexSearcher (indexReader);
	
	TopDocs documents = indexSearcher.search(parsedQuery, indexReader.numDocs() + 1);
	
	List <NoteSearchResult> searchResults = new ArrayList <NoteSearchResult>();
	
	for (ScoreDoc hit : documents.scoreDocs) {
		Document d = indexSearcher.doc (hit.doc);
		
		NoteSearchResult searchResult = documentToNote(d);
		
		searchResults.add(searchResult);
	}
	
	indexReader.close();
	
	index.close();
	
	return searchResults;
}
 
开发者ID:ser316asu,项目名称:Reinickendorf_SER316,代码行数:38,代码来源:NoteIndexer.java


注:本文中的org.apache.lucene.search.IndexSearcher.doc方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。