本文整理匯總了Java中org.apache.lucene.analysis.core.WhitespaceTokenizer.addAttribute方法的典型用法代碼示例。如果您正苦於以下問題:Java WhitespaceTokenizer.addAttribute方法的具體用法?Java WhitespaceTokenizer.addAttribute怎麽用?Java WhitespaceTokenizer.addAttribute使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.lucene.analysis.core.WhitespaceTokenizer
的用法示例。
在下文中一共展示了WhitespaceTokenizer.addAttribute方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testQueryReset
import org.apache.lucene.analysis.core.WhitespaceTokenizer; //導入方法依賴的package包/類
public void testQueryReset() throws Exception {
final String input = "How the s a brown s cow d like A B thing?";
WhitespaceTokenizer wt = new WhitespaceTokenizer(new StringReader(input));
CommonGramsFilter cgf = new CommonGramsFilter(wt, commonWords);
CommonGramsQueryFilter nsf = new CommonGramsQueryFilter(cgf);
CharTermAttribute term = wt.addAttribute(CharTermAttribute.class);
nsf.reset();
assertTrue(nsf.incrementToken());
assertEquals("How_the", term.toString());
assertTrue(nsf.incrementToken());
assertEquals("the_s", term.toString());
nsf.close();
wt.setReader(new StringReader(input));
nsf.reset();
assertTrue(nsf.incrementToken());
assertEquals("How_the", term.toString());
}
示例2: testQueryReset
import org.apache.lucene.analysis.core.WhitespaceTokenizer; //導入方法依賴的package包/類
public void testQueryReset() throws Exception {
final String input = "How the s a brown s cow d like A B thing?";
WhitespaceTokenizer wt = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
CommonGramsFilter cgf = new CommonGramsFilter(TEST_VERSION_CURRENT, wt, commonWords);
CommonGramsQueryFilter nsf = new CommonGramsQueryFilter(cgf);
CharTermAttribute term = wt.addAttribute(CharTermAttribute.class);
nsf.reset();
assertTrue(nsf.incrementToken());
assertEquals("How_the", term.toString());
assertTrue(nsf.incrementToken());
assertEquals("the_s", term.toString());
wt.setReader(new StringReader(input));
nsf.reset();
assertTrue(nsf.incrementToken());
assertEquals("How_the", term.toString());
}
示例3: testQueryReset
import org.apache.lucene.analysis.core.WhitespaceTokenizer; //導入方法依賴的package包/類
public void testQueryReset() throws Exception {
final String input = "How the s a brown s cow d like A B thing?";
WhitespaceTokenizer wt = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
CommonGramsFilter cgf = new CommonGramsFilter(TEST_VERSION_CURRENT, wt, commonWords);
CommonGramsQueryFilter nsf = new CommonGramsQueryFilter(cgf);
CharTermAttribute term = wt.addAttribute(CharTermAttribute.class);
nsf.reset();
assertTrue(nsf.incrementToken());
assertEquals("How_the", term.toString());
assertTrue(nsf.incrementToken());
assertEquals("the_s", term.toString());
nsf.close();
wt.setReader(new StringReader(input));
nsf.reset();
assertTrue(nsf.incrementToken());
assertEquals("How_the", term.toString());
}
示例4: analyzeReturnTokens
import org.apache.lucene.analysis.core.WhitespaceTokenizer; //導入方法依賴的package包/類
private String[] analyzeReturnTokens(String docText) {
List<String> result = new ArrayList<>();
Reader filter = new HTMLStripCharFilter(new StringReader(docText),
Collections.singleton("unescaped"));
WhitespaceTokenizer ts = new WhitespaceTokenizer();
final CharTermAttribute termAttribute = ts.addAttribute(CharTermAttribute.class);
try {
ts.setReader(filter);
ts.reset();
while (ts.incrementToken()) {
result.add(termAttribute.toString());
}
ts.end();
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
IOUtils.closeQuietly(ts);
}
return result.toArray(new String[result.size()]);
}
示例5: analyzeTagOne
import org.apache.lucene.analysis.core.WhitespaceTokenizer; //導入方法依賴的package包/類
private int[] analyzeTagOne(String docText, String start, String end) {
int[] result = {-1, -1};
Reader filter = new HTMLStripCharFilter(new StringReader(docText));
WhitespaceTokenizer ts = new WhitespaceTokenizer();
final CharTermAttribute termAttribute = ts.addAttribute(CharTermAttribute.class);
final OffsetAttribute offsetAttribute = ts.addAttribute(OffsetAttribute.class);
try {
ts.setReader(filter);
ts.reset();
while (ts.incrementToken()) {
final String termString = termAttribute.toString();
if (termString.equals(start))
result[0] = offsetAttribute.startOffset();
if (termString.equals(end)) {
result[1] = offsetAttribute.endOffset();
return result;
}
}
ts.end();
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
IOUtils.closeQuietly(ts);
}
return result;
}