本文整理汇总了Java中org.apache.lucene.analysis.core.WhitespaceTokenizer.addAttribute方法的典型用法代码示例。如果您正苦于以下问题:Java WhitespaceTokenizer.addAttribute方法的具体用法?Java WhitespaceTokenizer.addAttribute怎么用?Java WhitespaceTokenizer.addAttribute使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.analysis.core.WhitespaceTokenizer
的用法示例。
在下文中一共展示了WhitespaceTokenizer.addAttribute方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testQueryReset
import org.apache.lucene.analysis.core.WhitespaceTokenizer; //导入方法依赖的package包/类
public void testQueryReset() throws Exception {
final String input = "How the s a brown s cow d like A B thing?";
WhitespaceTokenizer wt = new WhitespaceTokenizer(new StringReader(input));
CommonGramsFilter cgf = new CommonGramsFilter(wt, commonWords);
CommonGramsQueryFilter nsf = new CommonGramsQueryFilter(cgf);
CharTermAttribute term = wt.addAttribute(CharTermAttribute.class);
nsf.reset();
assertTrue(nsf.incrementToken());
assertEquals("How_the", term.toString());
assertTrue(nsf.incrementToken());
assertEquals("the_s", term.toString());
nsf.close();
wt.setReader(new StringReader(input));
nsf.reset();
assertTrue(nsf.incrementToken());
assertEquals("How_the", term.toString());
}
示例2: testQueryReset
import org.apache.lucene.analysis.core.WhitespaceTokenizer; //导入方法依赖的package包/类
public void testQueryReset() throws Exception {
final String input = "How the s a brown s cow d like A B thing?";
WhitespaceTokenizer wt = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
CommonGramsFilter cgf = new CommonGramsFilter(TEST_VERSION_CURRENT, wt, commonWords);
CommonGramsQueryFilter nsf = new CommonGramsQueryFilter(cgf);
CharTermAttribute term = wt.addAttribute(CharTermAttribute.class);
nsf.reset();
assertTrue(nsf.incrementToken());
assertEquals("How_the", term.toString());
assertTrue(nsf.incrementToken());
assertEquals("the_s", term.toString());
wt.setReader(new StringReader(input));
nsf.reset();
assertTrue(nsf.incrementToken());
assertEquals("How_the", term.toString());
}
示例3: testQueryReset
import org.apache.lucene.analysis.core.WhitespaceTokenizer; //导入方法依赖的package包/类
public void testQueryReset() throws Exception {
final String input = "How the s a brown s cow d like A B thing?";
WhitespaceTokenizer wt = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
CommonGramsFilter cgf = new CommonGramsFilter(TEST_VERSION_CURRENT, wt, commonWords);
CommonGramsQueryFilter nsf = new CommonGramsQueryFilter(cgf);
CharTermAttribute term = wt.addAttribute(CharTermAttribute.class);
nsf.reset();
assertTrue(nsf.incrementToken());
assertEquals("How_the", term.toString());
assertTrue(nsf.incrementToken());
assertEquals("the_s", term.toString());
nsf.close();
wt.setReader(new StringReader(input));
nsf.reset();
assertTrue(nsf.incrementToken());
assertEquals("How_the", term.toString());
}
示例4: analyzeReturnTokens
import org.apache.lucene.analysis.core.WhitespaceTokenizer; //导入方法依赖的package包/类
private String[] analyzeReturnTokens(String docText) {
List<String> result = new ArrayList<>();
Reader filter = new HTMLStripCharFilter(new StringReader(docText),
Collections.singleton("unescaped"));
WhitespaceTokenizer ts = new WhitespaceTokenizer();
final CharTermAttribute termAttribute = ts.addAttribute(CharTermAttribute.class);
try {
ts.setReader(filter);
ts.reset();
while (ts.incrementToken()) {
result.add(termAttribute.toString());
}
ts.end();
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
IOUtils.closeQuietly(ts);
}
return result.toArray(new String[result.size()]);
}
示例5: analyzeTagOne
import org.apache.lucene.analysis.core.WhitespaceTokenizer; //导入方法依赖的package包/类
private int[] analyzeTagOne(String docText, String start, String end) {
int[] result = {-1, -1};
Reader filter = new HTMLStripCharFilter(new StringReader(docText));
WhitespaceTokenizer ts = new WhitespaceTokenizer();
final CharTermAttribute termAttribute = ts.addAttribute(CharTermAttribute.class);
final OffsetAttribute offsetAttribute = ts.addAttribute(OffsetAttribute.class);
try {
ts.setReader(filter);
ts.reset();
while (ts.incrementToken()) {
final String termString = termAttribute.toString();
if (termString.equals(start))
result[0] = offsetAttribute.startOffset();
if (termString.equals(end)) {
result[1] = offsetAttribute.endOffset();
return result;
}
}
ts.end();
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
IOUtils.closeQuietly(ts);
}
return result;
}