当前位置: 首页>>代码示例>>Java>>正文


Java BaseTokenStreamTestCase.assertTokenStreamContents方法代码示例

本文整理汇总了Java中org.apache.lucene.analysis.BaseTokenStreamTestCase.assertTokenStreamContents方法的典型用法代码示例。如果您正苦于以下问题:Java BaseTokenStreamTestCase.assertTokenStreamContents方法的具体用法?Java BaseTokenStreamTestCase.assertTokenStreamContents怎么用?Java BaseTokenStreamTestCase.assertTokenStreamContents使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.lucene.analysis.BaseTokenStreamTestCase的用法示例。


在下文中一共展示了BaseTokenStreamTestCase.assertTokenStreamContents方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testHugeDoc

import org.apache.lucene.analysis.BaseTokenStreamTestCase; //导入方法依赖的package包/类
public void testHugeDoc() throws IOException {
  StringBuilder sb = new StringBuilder();
  char whitespace[] = new char[4094];
  Arrays.fill(whitespace, ' ');
  sb.append(whitespace);
  sb.append("testing 1234");
  String input = sb.toString();
  UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(newAttributeFactory(), new StringReader(input));
  BaseTokenStreamTestCase.assertTokenStreamContents(tokenizer, new String[] { "testing", "1234" });
}
 
开发者ID:europeana,项目名称:search,代码行数:11,代码来源:TestUAX29URLEmailTokenizer.java

示例2: testHugeDoc

import org.apache.lucene.analysis.BaseTokenStreamTestCase; //导入方法依赖的package包/类
public void testHugeDoc() throws IOException {
  StringBuilder sb = new StringBuilder();
  char whitespace[] = new char[4094];
  Arrays.fill(whitespace, ' ');
  sb.append(whitespace);
  sb.append("testing 1234");
  String input = sb.toString();
  StandardTokenizer tokenizer = new StandardTokenizer(new StringReader(input));
  BaseTokenStreamTestCase.assertTokenStreamContents(tokenizer, new String[] { "testing", "1234" });
}
 
开发者ID:europeana,项目名称:search,代码行数:11,代码来源:TestStandardAnalyzer.java

示例3: testHugeDoc

import org.apache.lucene.analysis.BaseTokenStreamTestCase; //导入方法依赖的package包/类
public void testHugeDoc() throws IOException {
  StringBuilder sb = new StringBuilder();
  char whitespace[] = new char[4094];
  Arrays.fill(whitespace, ' ');
  sb.append(whitespace);
  sb.append("testing 1234");
  String input = sb.toString();
  UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
  BaseTokenStreamTestCase.assertTokenStreamContents(tokenizer, new String[] { "testing", "1234" });
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:11,代码来源:TestUAX29URLEmailTokenizer.java

示例4: testHugeDoc

import org.apache.lucene.analysis.BaseTokenStreamTestCase; //导入方法依赖的package包/类
public void testHugeDoc() throws IOException {
  StringBuilder sb = new StringBuilder();
  char whitespace[] = new char[4094];
  Arrays.fill(whitespace, ' ');
  sb.append(whitespace);
  sb.append("testing 1234");
  String input = sb.toString();
  StandardTokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
  BaseTokenStreamTestCase.assertTokenStreamContents(tokenizer, new String[] { "testing", "1234" });
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:11,代码来源:TestStandardAnalyzer.java

示例5: testConsume2

import org.apache.lucene.analysis.BaseTokenStreamTestCase; //导入方法依赖的package包/类
public void testConsume2() throws IOException {
  BaseTokenStreamTestCase.assertTokenStreamContents(new EmptyTokenStream(), new String[0]);
}
 
开发者ID:europeana,项目名称:search,代码行数:4,代码来源:TestEmptyTokenStream.java

示例6: testCustomTypes

import org.apache.lucene.analysis.BaseTokenStreamTestCase; //导入方法依赖的package包/类
@Test
public void testCustomTypes() throws Exception {
  String testText = "I borrowed $5,400.00 at 25% interest-rate";
  ResourceLoader loader = new SolrResourceLoader("solr/collection1");
  Map<String,String> args = new HashMap<>();
  args.put("luceneMatchVersion", TEST_VERSION_CURRENT.toString());
  args.put("generateWordParts", "1");
  args.put("generateNumberParts", "1");
  args.put("catenateWords", "1");
  args.put("catenateNumbers", "1");
  args.put("catenateAll", "0");
  args.put("splitOnCaseChange", "1");
  
  /* default behavior */
  WordDelimiterFilterFactory factoryDefault = new WordDelimiterFilterFactory(args);
  factoryDefault.inform(loader);
  
  TokenStream ts = factoryDefault.create(
      new MockTokenizer(new StringReader(testText), MockTokenizer.WHITESPACE, false));
  BaseTokenStreamTestCase.assertTokenStreamContents(ts, 
      new String[] { "I", "borrowed", "5", "540000", "400", "00", "at", "25", "interest", "interestrate", "rate" });

  ts = factoryDefault.create(
      new MockTokenizer(new StringReader("foo\u200Dbar"), MockTokenizer.WHITESPACE, false));
  BaseTokenStreamTestCase.assertTokenStreamContents(ts, 
      new String[] { "foo", "foobar", "bar" });

  
  /* custom behavior */
  args = new HashMap<>();
  // use a custom type mapping
  args.put("luceneMatchVersion", TEST_VERSION_CURRENT.toString());
  args.put("generateWordParts", "1");
  args.put("generateNumberParts", "1");
  args.put("catenateWords", "1");
  args.put("catenateNumbers", "1");
  args.put("catenateAll", "0");
  args.put("splitOnCaseChange", "1");
  args.put("types", "wdftypes.txt");
  WordDelimiterFilterFactory factoryCustom = new WordDelimiterFilterFactory(args);
  factoryCustom.inform(loader);
  
  ts = factoryCustom.create(
      new MockTokenizer(new StringReader(testText), MockTokenizer.WHITESPACE, false));
  BaseTokenStreamTestCase.assertTokenStreamContents(ts, 
      new String[] { "I", "borrowed", "$5,400.00", "at", "25%", "interest", "interestrate", "rate" });
  
  /* test custom behavior with a char > 0x7F, because we had to make a larger byte[] */
  ts = factoryCustom.create(
      new MockTokenizer(new StringReader("foo\u200Dbar"), MockTokenizer.WHITESPACE, false));
  BaseTokenStreamTestCase.assertTokenStreamContents(ts, 
      new String[] { "foo\u200Dbar" });
}
 
开发者ID:europeana,项目名称:search,代码行数:54,代码来源:TestWordDelimiterFilterFactory.java

示例7: testCustomTypes

import org.apache.lucene.analysis.BaseTokenStreamTestCase; //导入方法依赖的package包/类
@Test
public void testCustomTypes() throws Exception {
  String testText = "I borrowed $5,400.00 at 25% interest-rate";
  WordDelimiterFilterFactory factoryDefault = new WordDelimiterFilterFactory();
  ResourceLoader loader = new SolrResourceLoader("solr/collection1");
  Map<String,String> args = new HashMap<String,String>();
  args.put("generateWordParts", "1");
  args.put("generateNumberParts", "1");
  args.put("catenateWords", "1");
  args.put("catenateNumbers", "1");
  args.put("catenateAll", "0");
  args.put("splitOnCaseChange", "1");
  
  /* default behavior */
  factoryDefault.init(args);
  factoryDefault.inform(loader);
  
  TokenStream ts = factoryDefault.create(
      new MockTokenizer(new StringReader(testText), MockTokenizer.WHITESPACE, false));
  BaseTokenStreamTestCase.assertTokenStreamContents(ts, 
      new String[] { "I", "borrowed", "5", "400", "00", "540000", "at", "25", "interest", "rate", "interestrate" });

  ts = factoryDefault.create(
      new MockTokenizer(new StringReader("foo\u200Dbar"), MockTokenizer.WHITESPACE, false));
  BaseTokenStreamTestCase.assertTokenStreamContents(ts, 
      new String[] { "foo", "bar", "foobar" });

  
  /* custom behavior */
  WordDelimiterFilterFactory factoryCustom = new WordDelimiterFilterFactory();
  // use a custom type mapping
  args.put("types", "wdftypes.txt");
  factoryCustom.init(args);
  factoryCustom.inform(loader);
  
  ts = factoryCustom.create(
      new MockTokenizer(new StringReader(testText), MockTokenizer.WHITESPACE, false));
  BaseTokenStreamTestCase.assertTokenStreamContents(ts, 
      new String[] { "I", "borrowed", "$5,400.00", "at", "25%", "interest", "rate", "interestrate" });
  
  /* test custom behavior with a char > 0x7F, because we had to make a larger byte[] */
  ts = factoryCustom.create(
      new MockTokenizer(new StringReader("foo\u200Dbar"), MockTokenizer.WHITESPACE, false));
  BaseTokenStreamTestCase.assertTokenStreamContents(ts, 
      new String[] { "foo\u200Dbar" });
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:47,代码来源:TestWordDelimiterFilterFactory.java


注:本文中的org.apache.lucene.analysis.BaseTokenStreamTestCase.assertTokenStreamContents方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。