当前位置: 首页>>代码示例>>Java>>正文


Java TermQuery类代码示例

本文整理汇总了Java中org.apache.lucene.search.TermQuery的典型用法代码示例。如果您正苦于以下问题:Java TermQuery类的具体用法?Java TermQuery怎么用?Java TermQuery使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


TermQuery类属于org.apache.lucene.search包,在下文中一共展示了TermQuery类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testRamDirectory

import org.apache.lucene.search.TermQuery; //导入依赖的package包/类
public void testRamDirectory() throws IOException {
    long start = System.currentTimeMillis();
    IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer()).setOpenMode(IndexWriterConfig
            .OpenMode.CREATE);
    RAMDirectory ramDirectory = new RAMDirectory();
    IndexWriter indexWriter = new IndexWriter(ramDirectory, indexWriterConfig);
    for (int i = 0; i < 10000000; i++) {
        indexWriter.addDocument(addDocument(i));
    }
    indexWriter.commit();
    indexWriter.close();
    long end = System.currentTimeMillis();
    log.error("RamDirectory consumes {}s!", (end - start) / 1000);
    start = System.currentTimeMillis();
    IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(ramDirectory));
    int total = 0;
    for (int i = 0; i < 10000000; i++) {
        TermQuery key1 = new TermQuery(new Term("key1", "key" + i));
        TopDocs search = indexSearcher.search(key1, 10);
        total += search.totalHits;
    }
    System.out.println(total);
    end = System.currentTimeMillis();
    log.error("RamDirectory search consumes {}ms!", (end - start));
}
 
开发者ID:shijiebei2009,项目名称:RedisDirectory,代码行数:26,代码来源:TestLucene.java

示例2: testMMapDirectory

import org.apache.lucene.search.TermQuery; //导入依赖的package包/类
public void testMMapDirectory() throws IOException {
    long start = System.currentTimeMillis();
    IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer()).setOpenMode(IndexWriterConfig
            .OpenMode.CREATE);
    FSDirectory open = FSDirectory.open(Paths.get("E:/testlucene"));
    IndexWriter indexWriter = new IndexWriter(open, indexWriterConfig);
    for (int i = 0; i < 10000000; i++) {
        indexWriter.addDocument(addDocument(i));
    }
    indexWriter.commit();
    indexWriter.close();
    long end = System.currentTimeMillis();
    log.error("MMapDirectory consumes {}s!", (end - start) / 1000);
    start = System.currentTimeMillis();
    IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(open));
    int total = 0;
    for (int i = 0; i < 10000000; i++) {
        TermQuery key1 = new TermQuery(new Term("key1", "key" + i));
        TopDocs search = indexSearcher.search(key1, 10);
        total += search.totalHits;
    }
    System.out.println(total);
    end = System.currentTimeMillis();
    log.error("MMapDirectory search consumes {}ms!", (end - start));
}
 
开发者ID:shijiebei2009,项目名称:RedisDirectory,代码行数:26,代码来源:TestLucene.java

示例3: testSimpleNumericOps

import org.apache.lucene.search.TermQuery; //导入依赖的package包/类
public void testSimpleNumericOps() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));

    Document document = new Document();
    document.add(new TextField("_id", "1", Field.Store.YES));
    document.add(new LegacyIntField("test", 2, LegacyIntField.TYPE_STORED));
    indexWriter.addDocument(document);

    IndexReader reader = DirectoryReader.open(indexWriter);
    IndexSearcher searcher = new IndexSearcher(reader);
    TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
    Document doc = searcher.doc(topDocs.scoreDocs[0].doc);
    IndexableField f = doc.getField("test");
    assertThat(f.stringValue(), equalTo("2"));

    BytesRefBuilder bytes = new BytesRefBuilder();
    LegacyNumericUtils.intToPrefixCoded(2, 0, bytes);
    topDocs = searcher.search(new TermQuery(new Term("test", bytes.get())), 1);
    doc = searcher.doc(topDocs.scoreDocs[0].doc);
    f = doc.getField("test");
    assertThat(f.stringValue(), equalTo("2"));

    indexWriter.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:26,代码来源:SimpleLuceneTests.java

示例4: binaryNameQuery

import org.apache.lucene.search.TermQuery; //导入依赖的package包/类
static Query binaryNameQuery (final String resourceName) {
    final BooleanQuery query = new BooleanQuery ();
    int index = resourceName.lastIndexOf(BinaryName.PKG_SEPARATOR);  // NOI18N
    String pkgName, sName;
    if (index < 0) {
        pkgName = "";   // NOI18N
        sName = resourceName;
    }
    else {
        pkgName = resourceName.substring(0,index);
        sName = resourceName.substring(index+1);
    }
    sName = sName + WILDCARD_QUERY_WILDCARD;   //Type of type element (Enum, Class, Interface, Annotation)
    query.add (new TermQuery (new Term (FIELD_PACKAGE_NAME, pkgName)),BooleanClause.Occur.MUST);
    query.add (new WildcardQuery (new Term (FIELD_BINARY_NAME, sName)),BooleanClause.Occur.MUST);
    return query;
}
 
开发者ID:apache,项目名称:incubator-netbeans,代码行数:18,代码来源:DocumentUtil.java

示例5: loadQueries

import org.apache.lucene.search.TermQuery; //导入依赖的package包/类
private int loadQueries(IndexShard shard) {
    shard.refresh("percolator_load_queries");
    // NOTE: we acquire the searcher via the engine directly here since this is executed right
    // before the shard is marked as POST_RECOVERY
    try (Engine.Searcher searcher = shard.engine().acquireSearcher("percolator_load_queries")) {
        Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME));
        QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService);
        IndexSearcher indexSearcher = new IndexSearcher(searcher.reader());
        indexSearcher.setQueryCache(null);
        indexSearcher.search(query, queryCollector);
        Map<BytesRef, Query> queries = queryCollector.queries();
        for (Map.Entry<BytesRef, Query> entry : queries.entrySet()) {
            Query previousQuery = percolateQueries.put(entry.getKey(), entry.getValue());
            shardPercolateService.addedQuery(entry.getKey(), previousQuery, entry.getValue());
        }
        return queries.size();
    } catch (Exception e) {
        throw new PercolatorException(shardId.index(), "failed to load queries from percolator index", e);
    }
}
 
开发者ID:baidu,项目名称:Elasticsearch,代码行数:21,代码来源:PercolatorQueriesRegistry.java

示例6: createFQNQuery

import org.apache.lucene.search.TermQuery; //导入依赖的package包/类
private static BooleanQuery createFQNQuery(final String resourceName) {
    String pkgName;
    String sName;
    int index = resourceName.lastIndexOf(BinaryName.PKG_SEPARATOR);
    if (index < 0) {
        pkgName = "";       //NOI18N
        sName = resourceName;
    } else {
        pkgName = resourceName.substring(0, index);
        sName = resourceName.substring(index+1);
    }
    final BooleanQuery snQuery = new BooleanQuery();
    snQuery.add (new WildcardQuery (new Term (DocumentUtil.FIELD_BINARY_NAME, sName + DocumentUtil.WILDCARD_QUERY_WILDCARD)),Occur.SHOULD);
    snQuery.add (new PrefixQuery (new Term (DocumentUtil.FIELD_BINARY_NAME, sName + '$')),Occur.SHOULD);   //NOI18N
    if (pkgName.length() == 0) {
        return snQuery;
    }
    final BooleanQuery fqnQuery = new BooleanQuery();
    fqnQuery.add(new TermQuery(new Term(DocumentUtil.FIELD_PACKAGE_NAME,pkgName)), Occur.MUST);
    fqnQuery.add(snQuery, Occur.MUST);
    return fqnQuery;
}
 
开发者ID:apache,项目名称:incubator-netbeans,代码行数:23,代码来源:DocumentUtil.java

示例7: getLuceneQuery

import org.apache.lucene.search.TermQuery; //导入依赖的package包/类
/**
 * Returns the Lucene query for the given locale and query text.
 * 
 * @param searchString
 *            the text query for the Lucene query parser
 * @param mId
 *            the marketplace id
 * @param locale
 *            the locale for the analyzer to use
 * @param isDefaultLocaleHandling
 * @return the Lucene query for the given locale and query text
 * @throws ParseException
 *             in case the query cannot be parsed
 */
private org.apache.lucene.search.Query getLuceneQuery(String searchString,
        String mId, String locale, boolean isDefaultLocaleHandling)
        throws SyntaxError, QueryNodeException {

    // construct wildcard query for the actual search part
    org.apache.lucene.search.Query textQuery = LuceneQueryBuilder
            .getServiceQuery(searchString, locale, DEFAULT_LOCALE,
                    isDefaultLocaleHandling);

    // build mId part
    TermQuery mIdQuery = new TermQuery(new Term(ProductClassBridge.MP_ID,
            QueryParser.escape(mId).toLowerCase()));

    // now construct final query
    BooleanQuery query = new BooleanQuery();
    query.add(mIdQuery, Occur.MUST);
    query.add(textQuery, Occur.MUST);
    return query;
}
 
开发者ID:servicecatalog,项目名称:oscm,代码行数:34,代码来源:SearchServiceBean.java

示例8: getNewsOfNewsGroup

import org.apache.lucene.search.TermQuery; //导入依赖的package包/类
/**
 * Returns all {@link News} of the given news group except the {@link News} with the given id.
 * @param newsGroupId the news group id
 * @param exceptId the news which should not be returned
 * @return a {@link List} with all {@link News} of the requested news group except the {@link News} with the exceptId.
 */
private List<News> getNewsOfNewsGroup(final long newsGroupId, final long exceptId) {
	final BooleanQuery query = new BooleanQuery();

	QueryUtil.addTypeConf(query, NewsIndexType.getInstance());

	final NumericRangeQuery<Long> groupQuery = NumericRangeQuery.newLongRange(
			NewsIndexType.FIELD_NEWSGROUPID, newsGroupId, newsGroupId, true, true);
	query.add(groupQuery, Occur.MUST);

	// exclude news
	query.add(new TermQuery(new Term(IIndexElement.FIELD_ID, String.valueOf(exceptId))), Occur.MUST_NOT);

	final SearchOptions options = new SearchOptions();
	options.setSort(new Sort(ESortField.PUBLISH_DATE.getSortField(ESortOrder.DESC)));

	final DocumentsSearchResult result = IndexSearch.getInstance().search(query, options);
	return NewsIndexType.docsToNews(result.getResults());
}
 
开发者ID:XMBomb,项目名称:InComb,代码行数:25,代码来源:NewsDao.java

示例9: checkIndexContent

import org.apache.lucene.search.TermQuery; //导入依赖的package包/类
private void checkIndexContent(final String elementId,
		final String fieldContent, final int expectedAmount) throws IOException {
	final IndexReader reader = IndexManager.getInstance().getIndex().getIndexReader();
	final IndexSearcher searcher = new IndexSearcher(reader);
	final TopDocs topDocs = searcher.search(new TermQuery(new Term(FIELDNAME, fieldContent)), expectedAmount + 10);

	assertNotNull(topDocs);
	assertTrue(topDocs.totalHits == expectedAmount);

	if(expectedAmount > 0) {
		final ScoreDoc scoreDoc = topDocs.scoreDocs[0];
		assertNotNull(scoreDoc);

		final Document doc = reader.document(scoreDoc.doc);
		assertNotNull(doc);
		assertEquals(fieldContent, doc.get(FIELDNAME));
		assertEquals(elementId, doc.get(IIndexElement.FIELD_ID));
		assertEquals(INDEX_TYPE, doc.get(IIndexElement.FIELD_INDEX_TYPE));
	}
}
 
开发者ID:XMBomb,项目名称:InComb,代码行数:21,代码来源:IndexingThreadTest.java

示例10: findPosts

import org.apache.lucene.search.TermQuery; //导入依赖的package包/类
private List<Post> findPosts() {
	try {
		FullTextSession fullTextSession = getFullTextSession((Session) entityManager.getDelegate());
		Builder builder = new Builder();
		String[] fields = new String[] { "message.text", "topic.subject" };
		MultiFieldQueryParser parser = new MultiFieldQueryParser(fields, new StandardAnalyzer());
		builder.add(parser.parse(POST_TEXT), MUST);
		builder.add(new TermQuery(new Term("topic.forum.id", "0")), MUST);
		builder.add(new TermQuery(new Term("topic.forum.category.id", "0")), MUST);
		builder.add(new WildcardQuery(new Term("poster.userId", "root")), MUST);
		addPostTimeQuery(builder);
		FullTextQuery fullTextQuery = fullTextSession.createFullTextQuery(builder.build(), Post.class);
		fullTextQuery.setSort(getSort());
		fullTextQuery.setFirstResult(0);
		fullTextQuery.setMaxResults(15);
		@SuppressWarnings("unchecked")
		List<Post> posts = fullTextQuery.list();
		return posts;
	} catch (ParseException e) {
		logger.severe("error");
		return null;
	}
}
 
开发者ID:PacktPublishing,项目名称:Mastering-Java-EE-Development-with-WildFly,代码行数:24,代码来源:SearchTestCase.java

示例11: testVectorHighlighter

import org.apache.lucene.search.TermQuery; //导入依赖的package包/类
public void testVectorHighlighter() throws Exception {
    Directory dir = new RAMDirectory();
    IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));

    Document document = new Document();
    document.add(new TextField("_id", "1", Field.Store.YES));
    FieldType vectorsType = new FieldType(TextField.TYPE_STORED);
    vectorsType.setStoreTermVectors(true);
    vectorsType.setStoreTermVectorPositions(true);
    vectorsType.setStoreTermVectorOffsets(true);
    document.add(new Field("content", "the big bad dog", vectorsType));
    indexWriter.addDocument(document);

    IndexReader reader = DirectoryReader.open(indexWriter);
    IndexSearcher searcher = new IndexSearcher(reader);
    TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);

    assertThat(topDocs.totalHits, equalTo(1));

    FastVectorHighlighter highlighter = new FastVectorHighlighter();
    String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
            reader, topDocs.scoreDocs[0].doc, "content", 30);
    assertThat(fragment, notNullValue());
    assertThat(fragment, equalTo("the big <b>bad</b> dog"));
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:26,代码来源:VectorHighlighterTests.java

示例12: testCrossFieldMultiMatchQuery

import org.apache.lucene.search.TermQuery; //导入依赖的package包/类
public void testCrossFieldMultiMatchQuery() throws IOException {
    QueryShardContext queryShardContext = indexService.newQueryShardContext(
            randomInt(20), null, () -> { throw new UnsupportedOperationException(); });
    queryShardContext.setAllowUnmappedFields(true);
    Query parsedQuery = multiMatchQuery("banon").field("name.first", 2).field("name.last", 3).field("foobar").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).toQuery(queryShardContext);
    try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) {
        Query rewrittenQuery = searcher.searcher().rewrite(parsedQuery);

        BooleanQuery.Builder expected = new BooleanQuery.Builder();
        expected.add(new TermQuery(new Term("foobar", "banon")), BooleanClause.Occur.SHOULD);
        Query tq1 = new BoostQuery(new TermQuery(new Term("name.first", "banon")), 2);
        Query tq2 = new BoostQuery(new TermQuery(new Term("name.last", "banon")), 3);
        expected.add(new DisjunctionMaxQuery(Arrays.<Query>asList(tq1, tq2), 0f), BooleanClause.Occur.SHOULD);
        assertEquals(expected.build(), rewrittenQuery);
    }
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:17,代码来源:MultiMatchQueryTests.java

示例13: doToQuery

import org.apache.lucene.search.TermQuery; //导入依赖的package包/类
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
    DocumentMapper childDocMapper = context.getMapperService().documentMapper(type);
    if (childDocMapper == null) {
        if (ignoreUnmapped) {
            return new MatchNoDocsQuery();
        } else {
            throw new QueryShardException(context, "[" + NAME + "] no mapping found for type [" + type + "]");
        }
    }
    ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper();
    if (parentFieldMapper.active() == false) {
        throw new QueryShardException(context, "[" + NAME + "] _parent field has no parent type configured");
    }
    String fieldName = ParentFieldMapper.joinField(parentFieldMapper.type());

    BooleanQuery.Builder query = new BooleanQuery.Builder();
    query.add(new DocValuesTermsQuery(fieldName, id), BooleanClause.Occur.MUST);
    // Need to take child type into account, otherwise a child doc of different type with the same id could match
    query.add(new TermQuery(new Term(TypeFieldMapper.NAME, type)), BooleanClause.Occur.FILTER);
    return query.build();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:23,代码来源:ParentIdQueryBuilder.java

示例14: assertDocDoesNotExist

import org.apache.lucene.search.TermQuery; //导入依赖的package包/类
/**
 * Asserts that the doc in the index operation really doesn't exist
 */
private boolean assertDocDoesNotExist(final Index index, final boolean allowDeleted) throws IOException {
    final VersionValue versionValue = versionMap.getUnderLock(index.uid());
    if (versionValue != null) {
        if (versionValue.delete() == false || allowDeleted == false) {
            throw new AssertionError("doc [" + index.type() + "][" + index.id() + "] exists in version map (version " + versionValue + ")");
        }
    } else {
        try (Searcher searcher = acquireSearcher("assert doc doesn't exist")) {
            final long docsWithId = searcher.searcher().count(new TermQuery(index.uid()));
            if (docsWithId > 0) {
                throw new AssertionError("doc [" + index.type() + "][" + index.id() + "] exists [" + docsWithId + "] times in index");
            }
        }
    }
    return true;
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:20,代码来源:InternalEngine.java

示例15: run

import org.apache.lucene.search.TermQuery; //导入依赖的package包/类
@Override
public void run() {
  int i = 0;
  while (i < 10000) {
    try {
      if (data.size() <= i) {
        sleep(1);
        continue;
      }
      final String key = "key" + i;
      final String val = "value" + i;
      final List<Document> documents = index.searchForDocuments(new TermQuery(new Term(key, val)), 10, new Sort(new SortField(key, SortField.Type.STRING)));
      if (documents.size() != 1) {
        throw new RuntimeException("Invalid number of matching documents for " + key + ", found " + documents);
      }
      ++i;
    } catch (IOException ioe) {
      error = ioe;
      break;
    } catch (InterruptedException e) {
    } catch (AlreadyClosedException ace) {
      error = ace;
      break;
    }
  }
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:27,代码来源:TestLuceneIndexer.java


注:本文中的org.apache.lucene.search.TermQuery类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。