当前位置: 首页>>代码示例>>Java>>正文


Java DisjunctionMaxQuery.add方法代码示例

本文整理汇总了Java中org.apache.lucene.search.DisjunctionMaxQuery.add方法的典型用法代码示例。如果您正苦于以下问题:Java DisjunctionMaxQuery.add方法的具体用法?Java DisjunctionMaxQuery.add怎么用?Java DisjunctionMaxQuery.add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.lucene.search.DisjunctionMaxQuery的用法示例。


在下文中一共展示了DisjunctionMaxQuery.add方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getQuery

import org.apache.lucene.search.DisjunctionMaxQuery; //导入方法依赖的package包/类
@Override
public Query getQuery(Element e) throws ParserException {
  float tieBreaker = DOMUtils.getAttribute(e, "tieBreaker", 0.0f); 
  DisjunctionMaxQuery dq = new DisjunctionMaxQuery(tieBreaker);
  dq.setBoost(DOMUtils.getAttribute(e, "boost", 1.0f));

  NodeList nl = e.getChildNodes();
  for (int i = 0; i < nl.getLength(); i++) {
    Node node = nl.item(i);
    if (node instanceof Element) { // all elements are disjuncts.
      Element queryElem = (Element) node;
      Query q = factory.getQuery(queryElem);
      dq.add(q);
    }
  }

  return dq;
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:19,代码来源:DisjunctionMaxQueryBuilder.java

示例2: construct

import org.apache.lucene.search.DisjunctionMaxQuery; //导入方法依赖的package包/类
@Override
protected Query construct(LindenQuery lindenQuery, LindenConfig config) throws Exception {
  if (!lindenQuery.isSetDisMaxQuery()) {
    return null;
  }

  LindenDisMaxQuery disMaxQuery = lindenQuery.getDisMaxQuery();
  DisjunctionMaxQuery disjunctionMaxQuery = new DisjunctionMaxQuery((float) disMaxQuery.getTie());
  for (LindenQuery subLindenQuery : disMaxQuery.getQueries()) {
    Query query = QueryConstructor.constructQuery(subLindenQuery, config);
    if (query != null) {
      disjunctionMaxQuery.add(query);
    }
  }
  return disjunctionMaxQuery;
}
 
开发者ID:XiaoMi,项目名称:linden,代码行数:17,代码来源:DisMaxQueryConstructor.java

示例3: dmq

import org.apache.lucene.search.DisjunctionMaxQuery; //导入方法依赖的package包/类
protected Query dmq( float tieBreakerMultiplier, Query... queries ){
  DisjunctionMaxQuery query = new DisjunctionMaxQuery( tieBreakerMultiplier );
  for( Query q : queries ){
    query.add( q );
  }
  return query;
}
 
开发者ID:europeana,项目名称:search,代码行数:8,代码来源:AbstractTestCase.java

示例4: getFieldQuery

import org.apache.lucene.search.DisjunctionMaxQuery; //导入方法依赖的package包/类
/**
 * Delegates to the super class unless the field has been specified
 * as an alias -- in which case we recurse on each of
 * the aliased fields, and the results are composed into a
 * DisjunctionMaxQuery.  (so yes: aliases which point at other
 * aliases should work)
 */
@Override
protected Query getFieldQuery(String field, String queryText, boolean quoted)
  throws SyntaxError {

  if (aliases.containsKey(field)) {

    Alias a = aliases.get(field);
    DisjunctionMaxQuery q = new DisjunctionMaxQuery(a.tie);

    /* we might not get any valid queries from delegation,
     * in which case we should return null
     */
    boolean ok = false;

    for (String f : a.fields.keySet()) {

      Query sub = getFieldQuery(f,queryText,quoted);
      if (null != sub) {
        if (null != a.fields.get(f)) {
          sub.setBoost(a.fields.get(f));
        }
        q.add(sub);
        ok = true;
      }
    }
    return ok ? q : null;

  } else {
    try {
      return super.getFieldQuery(field, queryText, quoted);
    } catch (Exception e) {
      return null;
    }
  }
}
 
开发者ID:europeana,项目名称:search,代码行数:43,代码来源:SolrPluginUtils.java

示例5: getConcepts

import org.apache.lucene.search.DisjunctionMaxQuery; //导入方法依赖的package包/类
@Override
public String[] getConcepts(String label) throws IOException {
  List<String> concepts = new ArrayList<String>();
  
  // convert the query to lower-case
  String queryString = label.toLowerCase();
  
  AllDocCollector collector = new AllDocCollector();
  
  DisjunctionMaxQuery query = new DisjunctionMaxQuery(0.0f);
  query.add(new TermQuery(new Term(FIELD_PREF_LABEL, queryString)));
  query.add(new TermQuery(new Term(FIELD_ALT_LABEL, queryString)));
  query.add(new TermQuery(new Term(FIELD_HIDDEN_LABEL, queryString)));
  query.add(new TermQuery(new Term(FIELD_PREFMALE_LABEL, queryString)));
  query.add(new TermQuery(new Term(FIELD_PREFFEMALE_LABEL, queryString)));
  query.add(new TermQuery(new Term(FIELD_PREFNEUTER_LABEL, queryString)));
  query.add(new TermQuery(new Term(FIELD_ALTMALE_LABEL, queryString)));
  query.add(new TermQuery(new Term(FIELD_ALTFEMALE_LABEL, queryString)));
  query.add(new TermQuery(new Term(FIELD_ALTNEUTER_LABEL, queryString)));
  searcher.search(query, collector);
  
  for (Integer hit : collector.getDocs()) {
    Document doc = searcher.doc(hit);
    String conceptURI = doc.getValues(FIELD_URI)[0];
    concepts.add(conceptURI);
  }
  
  return concepts.toArray(new String[concepts.size()]);
}
 
开发者ID:KepaJRodriguez,项目名称:lucene-skos-ehri,代码行数:30,代码来源:SKOSEngineImpl.java

示例6: testWildcardInDisjunctionMax

import org.apache.lucene.search.DisjunctionMaxQuery; //导入方法依赖的package包/类
public void testWildcardInDisjunctionMax() throws Exception {
  Directory dir = newDirectory();
  // use simpleanalyzer for more natural tokenization (else "test." is a token)
  final Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
  IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
  iwc.setMergePolicy(newLogMergePolicy());
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
  
  FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
  offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
  Field body = new Field("body", "", offsetsType);
  Document doc = new Document();
  doc.add(body);
  
  body.setStringValue("This is a test.");
  iw.addDocument(doc);
  body.setStringValue("Test a one sentence document.");
  iw.addDocument(doc);
  
  IndexReader ir = iw.getReader();
  iw.close();
  
  IndexSearcher searcher = newSearcher(ir);
  PostingsHighlighter highlighter = new PostingsHighlighter() {
    @Override
    protected Analyzer getIndexAnalyzer(String field) {
      return analyzer;
    }
  };
  DisjunctionMaxQuery query = new DisjunctionMaxQuery(0);
  query.add(new WildcardQuery(new Term("body", "te*")));
  TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits);
  String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("This is a <b>test</b>.", snippets[0]);
  assertEquals("<b>Test</b> a one sentence document.", snippets[1]);
  
  ir.close();
  dir.close();
}
 
开发者ID:europeana,项目名称:search,代码行数:42,代码来源:TestMultiTermHighlighting.java

示例7: testOverlapWithOffset

import org.apache.lucene.search.DisjunctionMaxQuery; //导入方法依赖的package包/类
public void testOverlapWithOffset() throws IOException, InvalidTokenOffsetsException {
  final String TEXT = "the fox did not jump";
  final Directory directory = newDirectory();
  final IndexWriter indexWriter = new IndexWriter(directory,
      newIndexWriterConfig(null));
  try {
    final Document document = new Document();
    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
    customType.setStoreTermVectors(true);
    customType.setStoreTermVectorOffsets(true);
    document.add(new Field(FIELD, new OverlappingTokenStream(), customType));
    indexWriter.addDocument(document);
  } finally {
    indexWriter.close();
  }
  final IndexReader indexReader = DirectoryReader.open(directory);
  assertEquals(1, indexReader.numDocs());
  final IndexSearcher indexSearcher = newSearcher(indexReader);
  try {
    final DisjunctionMaxQuery query = new DisjunctionMaxQuery(1);
    query.add(new SpanTermQuery(new Term(FIELD, "{fox}")));
    query.add(new SpanTermQuery(new Term(FIELD, "fox")));
      // final Query phraseQuery = new SpanNearQuery(new SpanQuery[] {
      // new SpanTermQuery(new Term(FIELD, "{fox}")),
      // new SpanTermQuery(new Term(FIELD, "fox")) }, 0, true);

    TopDocs hits = indexSearcher.search(query, 1);
    assertEquals(1, hits.totalHits);
    final Highlighter highlighter = new Highlighter(
        new SimpleHTMLFormatter(), new SimpleHTMLEncoder(),
        new QueryScorer(query));
    final TokenStream tokenStream = TokenSources
        .getTokenStream(
            indexReader.getTermVector(0, FIELD),
            false);
    assertEquals("<B>the fox</B> did not jump",
        highlighter.getBestFragment(tokenStream, TEXT));
  } finally {
    indexReader.close();
    directory.close();
  }
}
 
开发者ID:europeana,项目名称:search,代码行数:43,代码来源:TokenSourcesTest.java

示例8: testOverlapWithPositionsAndOffset

import org.apache.lucene.search.DisjunctionMaxQuery; //导入方法依赖的package包/类
public void testOverlapWithPositionsAndOffset()
    throws IOException, InvalidTokenOffsetsException {
  final String TEXT = "the fox did not jump";
  final Directory directory = newDirectory();
  final IndexWriter indexWriter = new IndexWriter(directory,
      newIndexWriterConfig(null));
  try {
    final Document document = new Document();
    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
    customType.setStoreTermVectors(true);
    customType.setStoreTermVectorOffsets(true);
    customType.setStoreTermVectorPositions(true);
    document.add(new Field(FIELD, new OverlappingTokenStream(), customType));
    indexWriter.addDocument(document);
  } finally {
    indexWriter.close();
  }
  final IndexReader indexReader = DirectoryReader.open(directory);
  try {
    assertEquals(1, indexReader.numDocs());
    final IndexSearcher indexSearcher = newSearcher(indexReader);
    final DisjunctionMaxQuery query = new DisjunctionMaxQuery(1);
    query.add(new SpanTermQuery(new Term(FIELD, "{fox}")));
    query.add(new SpanTermQuery(new Term(FIELD, "fox")));
    // final Query phraseQuery = new SpanNearQuery(new SpanQuery[] {
    // new SpanTermQuery(new Term(FIELD, "{fox}")),
    // new SpanTermQuery(new Term(FIELD, "fox")) }, 0, true);

    TopDocs hits = indexSearcher.search(query, 1);
    assertEquals(1, hits.totalHits);
    final Highlighter highlighter = new Highlighter(
        new SimpleHTMLFormatter(), new SimpleHTMLEncoder(),
        new QueryScorer(query));
    final TokenStream tokenStream = TokenSources
        .getTokenStream(
            indexReader.getTermVector(0, FIELD),
            false);
    assertEquals("<B>the fox</B> did not jump",
        highlighter.getBestFragment(tokenStream, TEXT));
  } finally {
    indexReader.close();
    directory.close();
  }
}
 
开发者ID:europeana,项目名称:search,代码行数:45,代码来源:TokenSourcesTest.java

示例9: testOverlapWithOffset

import org.apache.lucene.search.DisjunctionMaxQuery; //导入方法依赖的package包/类
public void testOverlapWithOffset() throws IOException, InvalidTokenOffsetsException {
  final String TEXT = "the fox did not jump";
  final Directory directory = newDirectory();
  final IndexWriter indexWriter = new IndexWriter(directory,
      newIndexWriterConfig(TEST_VERSION_CURRENT, null));
  try {
    final Document document = new Document();
    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
    customType.setStoreTermVectors(true);
    customType.setStoreTermVectorOffsets(true);
    document.add(new Field(FIELD, new OverlappingTokenStream(), customType));
    indexWriter.addDocument(document);
  } finally {
    indexWriter.close();
  }
  final IndexReader indexReader = DirectoryReader.open(directory);
  assertEquals(1, indexReader.numDocs());
  final IndexSearcher indexSearcher = newSearcher(indexReader);
  try {
    final DisjunctionMaxQuery query = new DisjunctionMaxQuery(1);
    query.add(new SpanTermQuery(new Term(FIELD, "{fox}")));
    query.add(new SpanTermQuery(new Term(FIELD, "fox")));
      // final Query phraseQuery = new SpanNearQuery(new SpanQuery[] {
      // new SpanTermQuery(new Term(FIELD, "{fox}")),
      // new SpanTermQuery(new Term(FIELD, "fox")) }, 0, true);

    TopDocs hits = indexSearcher.search(query, 1);
    assertEquals(1, hits.totalHits);
    final Highlighter highlighter = new Highlighter(
        new SimpleHTMLFormatter(), new SimpleHTMLEncoder(),
        new QueryScorer(query));
    final TokenStream tokenStream = TokenSources
        .getTokenStream(
            indexReader.getTermVector(0, FIELD),
            false);
    assertEquals("<B>the fox</B> did not jump",
        highlighter.getBestFragment(tokenStream, TEXT));
  } finally {
    indexReader.close();
    directory.close();
  }
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:43,代码来源:TokenSourcesTest.java

示例10: testOverlapWithPositionsAndOffset

import org.apache.lucene.search.DisjunctionMaxQuery; //导入方法依赖的package包/类
public void testOverlapWithPositionsAndOffset()
    throws IOException, InvalidTokenOffsetsException {
  final String TEXT = "the fox did not jump";
  final Directory directory = newDirectory();
  final IndexWriter indexWriter = new IndexWriter(directory,
      newIndexWriterConfig(TEST_VERSION_CURRENT, null));
  try {
    final Document document = new Document();
    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
    customType.setStoreTermVectors(true);
    customType.setStoreTermVectorOffsets(true);
    customType.setStoreTermVectorPositions(true);
    document.add(new Field(FIELD, new OverlappingTokenStream(), customType));
    indexWriter.addDocument(document);
  } finally {
    indexWriter.close();
  }
  final IndexReader indexReader = DirectoryReader.open(directory);
  try {
    assertEquals(1, indexReader.numDocs());
    final IndexSearcher indexSearcher = newSearcher(indexReader);
    final DisjunctionMaxQuery query = new DisjunctionMaxQuery(1);
    query.add(new SpanTermQuery(new Term(FIELD, "{fox}")));
    query.add(new SpanTermQuery(new Term(FIELD, "fox")));
    // final Query phraseQuery = new SpanNearQuery(new SpanQuery[] {
    // new SpanTermQuery(new Term(FIELD, "{fox}")),
    // new SpanTermQuery(new Term(FIELD, "fox")) }, 0, true);

    TopDocs hits = indexSearcher.search(query, 1);
    assertEquals(1, hits.totalHits);
    final Highlighter highlighter = new Highlighter(
        new SimpleHTMLFormatter(), new SimpleHTMLEncoder(),
        new QueryScorer(query));
    final TokenStream tokenStream = TokenSources
        .getTokenStream(
            indexReader.getTermVector(0, FIELD),
            false);
    assertEquals("<B>the fox</B> did not jump",
        highlighter.getBestFragment(tokenStream, TEXT));
  } finally {
    indexReader.close();
    directory.close();
  }
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:45,代码来源:TokenSourcesTest.java


注:本文中的org.apache.lucene.search.DisjunctionMaxQuery.add方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。