当前位置: 首页>>代码示例>>Java>>正文


Java Automata.makeString方法代码示例

本文整理汇总了Java中org.apache.lucene.util.automaton.Automata.makeString方法的典型用法代码示例。如果您正苦于以下问题:Java Automata.makeString方法的具体用法?Java Automata.makeString怎么用?Java Automata.makeString使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.lucene.util.automaton.Automata的用法示例。


在下文中一共展示了Automata.makeString方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testBoost

import org.apache.lucene.util.automaton.Automata; //导入方法依赖的package包/类
public void testBoost() throws Exception {
  CharacterRunAutomaton stopSet = new CharacterRunAutomaton(Automata.makeString("on"));
  Analyzer oneStopAnalyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopSet);

  PrecedenceQueryParser qp = new PrecedenceQueryParser();
  qp.setAnalyzer(oneStopAnalyzer);
  Query q = qp.parse("on^1.0", "field");
  assertNotNull(q);
  q = qp.parse("\"hello\"^2.0", "field");
  assertNotNull(q);
  assertEquals(q.getBoost(), (float) 2.0, (float) 0.5);
  q = qp.parse("hello^2.0", "field");
  assertNotNull(q);
  assertEquals(q.getBoost(), (float) 2.0, (float) 0.5);
  q = qp.parse("\"on\"^1.0", "field");
  assertNotNull(q);

  q = getParser(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET)).parse("the^3",
      "field");
  assertNotNull(q);
}
 
开发者ID:europeana,项目名称:search,代码行数:22,代码来源:TestPrecedenceQueryParser.java

示例2: testBoost

import org.apache.lucene.util.automaton.Automata; //导入方法依赖的package包/类
public void testBoost() throws Exception {
  CharacterRunAutomaton stopSet = new CharacterRunAutomaton(Automata.makeString("on"));
  Analyzer oneStopAnalyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopSet);
  StandardQueryParser qp = new StandardQueryParser();
  qp.setAnalyzer(oneStopAnalyzer);

  Query q = qp.parse("on^1.0", "field");
  assertNotNull(q);
  q = qp.parse("\"hello\"^2.0", "field");
  assertNotNull(q);
  assertEquals(q.getBoost(), (float) 2.0, (float) 0.5);
  q = qp.parse("hello^2.0", "field");
  assertNotNull(q);
  assertEquals(q.getBoost(), (float) 2.0, (float) 0.5);
  q = qp.parse("\"on\"^1.0", "field");
  assertNotNull(q);

  StandardQueryParser qp2 = new StandardQueryParser();
  qp2.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET));

  q = qp2.parse("the^3", "field");
  // "the" is a stop word so the result is an empty query:
  assertNotNull(q);
  assertEquals("", q.toString());
  assertEquals(1.0f, q.getBoost(), 0.01f);
}
 
开发者ID:europeana,项目名称:search,代码行数:27,代码来源:TestQPHelper.java

示例3: testBoost

import org.apache.lucene.util.automaton.Automata; //导入方法依赖的package包/类
public void testBoost()
  throws Exception {
  CharacterRunAutomaton stopWords = new CharacterRunAutomaton(Automata.makeString("on"));
  Analyzer oneStopAnalyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopWords);
  CommonQueryParserConfiguration qp = getParserConfig(oneStopAnalyzer);
  Query q = getQuery("on^1.0",qp);
  assertNotNull(q);
  q = getQuery("\"hello\"^2.0",qp);
  assertNotNull(q);
  assertEquals(q.getBoost(), (float) 2.0, (float) 0.5);
  q = getQuery("hello^2.0",qp);
  assertNotNull(q);
  assertEquals(q.getBoost(), (float) 2.0, (float) 0.5);
  q = getQuery("\"on\"^1.0",qp);
  assertNotNull(q);

  Analyzer a2 = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET); 
  CommonQueryParserConfiguration qp2 = getParserConfig(a2);
  q = getQuery("the^3", qp2);
  // "the" is a stop word so the result is an empty query:
  assertNotNull(q);
  assertEquals("", q.toString());
  assertEquals(1.0f, q.getBoost(), 0.01f);
}
 
开发者ID:europeana,项目名称:search,代码行数:25,代码来源:QueryParserTestBase.java

示例4: testBoost

import org.apache.lucene.util.automaton.Automata; //导入方法依赖的package包/类
@Test
public void testBoost() throws Exception {
	CharacterRunAutomaton stopWords = new CharacterRunAutomaton(Automata.makeString("on"));
	Analyzer oneStopAnalyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopWords);
	QueryParser qp = getParserConfig(oneStopAnalyzer);
	Query q = getQuery("on^1.0", qp);
	Assert.assertNotNull(q);
	q = getQuery("\"hello\"^2.0", qp);
	Assert.assertNotNull(q);
	Assert.assertEquals(((BoostQuery) q).getBoost(), (float) 2.0, (float) 0.5);
	q = getQuery("hello^2.0", qp);
	Assert.assertNotNull(q);
	Assert.assertEquals(((BoostQuery) q).getBoost(), (float) 2.0, (float) 0.5);
	q = getQuery("\"on\"^1.0", qp);
	Assert.assertNotNull(q);

	Analyzer a2 = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET);
	QueryParser qp2 = getParserConfig(a2);
	q = getQuery("the^3", qp2);
	// "the" is a stop word so the result is an empty query:
	Assert.assertNotNull(q);
	assertMatchNoDocsQuery(q);
	Assert.assertFalse(q instanceof BoostQuery);
}
 
开发者ID:easynet-cn,项目名称:resource-query-parser,代码行数:25,代码来源:QueryParserTestBase.java

示例5: testBoost

import org.apache.lucene.util.automaton.Automata; //导入方法依赖的package包/类
public void testBoost()
    throws Exception {
  CharacterRunAutomaton stopWords = new CharacterRunAutomaton(Automata.makeString("on"));
  Analyzer oneStopAnalyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopWords);
  CommonQueryParserConfiguration qp = getParserConfig(oneStopAnalyzer);
  Query q = getQuery("on^1.0",qp);
  assertNotNull(q);
  q = getQuery("\"hello\"^2.0",qp);
  assertNotNull(q);

  assertEquals(getBoost(q), (float) 2.0, (float) 0.5);
  q = getQuery("hello^2.0",qp);
  assertNotNull(q);
  assertEquals(((BoostQuery)q).getBoost(), (float) 2.0, (float) 0.5);
  q = getQuery("\"on\"^1.0",qp);
  assertNotNull(q);

  Analyzer a2 = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET);
  CommonQueryParserConfiguration qp2 = getParserConfig(a2);
  q = getQuery("the^3", qp2);
  // "the" is a stop word so the result is an empty query:
  assertNotNull(q);
  assertEmpty(q);
  assertEquals(1.0f, getBoost(q), 0.01f);
}
 
开发者ID:tballison,项目名称:lucene-addons,代码行数:26,代码来源:QueryParserTestBase.java

示例6: toAutomaton

import org.apache.lucene.util.automaton.Automata; //导入方法依赖的package包/类
@Override
public Automaton toAutomaton() {
    Automaton automaton;
    if(precisions == null || precisions.length == 0) {
         automaton = Automata.makeString(location);
    } else {
        automaton = Automata.makeString(location.substring(0, Math.max(1, Math.min(location.length(), precisions[0]))));
        for (int i = 1; i < precisions.length; i++) {
            final String cell = location.substring(0, Math.max(1, Math.min(location.length(), precisions[i])));
            automaton = Operations.union(automaton, Automata.makeString(cell));
        }
    }
    return automaton;
}
 
开发者ID:baidu,项目名称:Elasticsearch,代码行数:15,代码来源:GeolocationContextMapping.java

示例7: beforeClass

import org.apache.lucene.util.automaton.Automata; //导入方法依赖的package包/类
@BeforeClass
public static void beforeClass() throws Exception {
  Random random = random();
  directory = newDirectory();
  stopword = "" + randomChar();
  CharacterRunAutomaton stopset = new CharacterRunAutomaton(Automata.makeString(stopword));
  analyzer = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false, stopset);
  RandomIndexWriter iw = new RandomIndexWriter(random, directory, analyzer);
  Document doc = new Document();
  Field id = new StringField("id", "", Field.Store.NO);
  Field field = new TextField("field", "", Field.Store.NO);
  doc.add(id);
  doc.add(field);
  
  // index some docs
  int numDocs = atLeast(1000);
  for (int i = 0; i < numDocs; i++) {
    id.setStringValue(Integer.toString(i));
    field.setStringValue(randomFieldContents());
    iw.addDocument(doc);
  }
  
  // delete some docs
  int numDeletes = numDocs/20;
  for (int i = 0; i < numDeletes; i++) {
    Term toDelete = new Term("id", Integer.toString(random.nextInt(numDocs)));
    if (random.nextBoolean()) {
      iw.deleteDocuments(toDelete);
    } else {
      iw.deleteDocuments(new TermQuery(toDelete));
    }
  }
  
  reader = iw.getReader();
  s1 = newSearcher(reader);
  s2 = newSearcher(reader);
  iw.close();
}
 
开发者ID:europeana,项目名称:search,代码行数:39,代码来源:SearchEquivalenceTestBase.java

示例8: testEquals

import org.apache.lucene.util.automaton.Automata; //导入方法依赖的package包/类
public void testEquals() {
  AutomatonQuery a1 = new AutomatonQuery(newTerm("foobar"), Automata
      .makeString("foobar"));
  // reference to a1
  AutomatonQuery a2 = a1;
  // same as a1 (accepts the same language, same term)
  AutomatonQuery a3 = new AutomatonQuery(newTerm("foobar"),
                          Operations.concatenate(
                               Automata.makeString("foo"),
                               Automata.makeString("bar")));
  // different than a1 (same term, but different language)
  AutomatonQuery a4 = new AutomatonQuery(newTerm("foobar"),
                                         Automata.makeString("different"));
  // different than a1 (different term, same language)
  AutomatonQuery a5 = new AutomatonQuery(newTerm("blah"),
                                         Automata.makeString("foobar"));
  
  assertEquals(a1.hashCode(), a2.hashCode());
  assertEquals(a1, a2);
  
  assertEquals(a1.hashCode(), a3.hashCode());
  assertEquals(a1, a3);

  // different class
  AutomatonQuery w1 = new WildcardQuery(newTerm("foobar"));
  // different class
  AutomatonQuery w2 = new RegexpQuery(newTerm("foobar"));
  
  assertFalse(a1.equals(w1));
  assertFalse(a1.equals(w2));
  assertFalse(w1.equals(w2));
  assertFalse(a1.equals(a4));
  assertFalse(a1.equals(a5));
  assertFalse(a1.equals(null));
}
 
开发者ID:europeana,项目名称:search,代码行数:36,代码来源:TestAutomatonQuery.java

示例9: testRewriteSingleTerm

import org.apache.lucene.util.automaton.Automata; //导入方法依赖的package包/类
/**
 * Test that rewriting to a single term works as expected, preserves
 * MultiTermQuery semantics.
 */
public void testRewriteSingleTerm() throws IOException {
  AutomatonQuery aq = new AutomatonQuery(newTerm("bogus"), Automata.makeString("piece"));
  Terms terms = MultiFields.getTerms(searcher.getIndexReader(), FN);
  assertTrue(aq.getTermsEnum(terms) instanceof SingleTermsEnum);
  assertEquals(1, automatonQueryNrHits(aq));
}
 
开发者ID:europeana,项目名称:search,代码行数:11,代码来源:TestAutomatonQuery.java

示例10: testRewritePrefix

import org.apache.lucene.util.automaton.Automata; //导入方法依赖的package包/类
/**
 * Test that rewriting to a prefix query works as expected, preserves
 * MultiTermQuery semantics.
 */
public void testRewritePrefix() throws IOException {
  Automaton pfx = Automata.makeString("do");
  Automaton prefixAutomaton = Operations.concatenate(pfx, Automata.makeAnyString());
  AutomatonQuery aq = new AutomatonQuery(newTerm("bogus"), prefixAutomaton);
  Terms terms = MultiFields.getTerms(searcher.getIndexReader(), FN);
  assertTrue(aq.getTermsEnum(terms) instanceof PrefixTermsEnum);
  assertEquals(3, automatonQueryNrHits(aq));
}
 
开发者ID:europeana,项目名称:search,代码行数:13,代码来源:TestAutomatonQuery.java

示例11: testMaxSizeHighlightTruncates

import org.apache.lucene.util.automaton.Automata; //导入方法依赖的package包/类
public void testMaxSizeHighlightTruncates() throws Exception {
  TestHighlightRunner helper = new TestHighlightRunner() {

    @Override
    public void run() throws Exception {
      String goodWord = "goodtoken";
      CharacterRunAutomaton stopWords = new CharacterRunAutomaton(Automata.makeString("stoppedtoken"));
      // we disable MockTokenizer checks because we will forcefully limit the 
      // tokenstream and call end() before incrementToken() returns false.
      final MockAnalyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopWords);
      analyzer.setEnableChecks(false);
      TermQuery query = new TermQuery(new Term("data", goodWord));

      String match;
      StringBuilder sb = new StringBuilder();
      sb.append(goodWord);
      for (int i = 0; i < 10000; i++) {
        sb.append(" ");
        // only one stopword
        sb.append("stoppedtoken");
      }
      SimpleHTMLFormatter fm = new SimpleHTMLFormatter();
      Highlighter hg = getHighlighter(query, "data", fm);// new Highlighter(fm,
      // new
      // QueryTermScorer(query));
      hg.setTextFragmenter(new NullFragmenter());
      hg.setMaxDocCharsToAnalyze(100);
      match = hg.getBestFragment(analyzer, "data", sb.toString());
      assertTrue("Matched text should be no more than 100 chars in length ", match.length() < hg
          .getMaxDocCharsToAnalyze());

      // add another tokenized word to the overrall length - but set way
      // beyond
      // the length of text under consideration (after a large slug of stop
      // words
      // + whitespace)
      sb.append(" ");
      sb.append(goodWord);
      match = hg.getBestFragment(analyzer, "data", sb.toString());
      assertTrue("Matched text should be no more than 100 chars in length ", match.length() < hg
          .getMaxDocCharsToAnalyze());
    }
  };

  helper.start();

}
 
开发者ID:europeana,项目名称:search,代码行数:48,代码来源:HighlighterTest.java

示例12: s2a

import org.apache.lucene.util.automaton.Automata; //导入方法依赖的package包/类
private Automaton s2a(String s) {
  return Automata.makeString(s);
}
 
开发者ID:europeana,项目名称:search,代码行数:4,代码来源:TestGraphTokenizers.java


注:本文中的org.apache.lucene.util.automaton.Automata.makeString方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。