本文整理汇总了Java中org.apache.lucene.analysis.Token.setOffset方法的典型用法代码示例。如果您正苦于以下问题:Java Token.setOffset方法的具体用法?Java Token.setOffset怎么用?Java Token.setOffset使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.analysis.Token
的用法示例。
在下文中一共展示了Token.setOffset方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: analyze
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
protected void analyze(Collection<Token> result, String text, int offset, int flagsAttValue) throws IOException {
TokenStream stream = analyzer.tokenStream("", text);
// TODO: support custom attributes
CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);
TypeAttribute typeAtt = stream.addAttribute(TypeAttribute.class);
PayloadAttribute payloadAtt = stream.addAttribute(PayloadAttribute.class);
PositionIncrementAttribute posIncAtt = stream.addAttribute(PositionIncrementAttribute.class);
OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class);
stream.reset();
while (stream.incrementToken()) {
Token token = new Token();
token.copyBuffer(termAtt.buffer(), 0, termAtt.length());
token.setOffset(offset + offsetAtt.startOffset(),
offset + offsetAtt.endOffset());
token.setFlags(flagsAttValue); //overwriting any flags already set...
token.setType(typeAtt.type());
token.setPayload(payloadAtt.getPayload());
token.setPositionIncrement(posIncAtt.getPositionIncrement());
result.add(token);
}
stream.end();
stream.close();
}
示例2: getNextPrefixInputToken
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
private Token getNextPrefixInputToken(Token token) throws IOException {
if (!prefix.incrementToken()) return null;
token.copyBuffer(p_termAtt.buffer(), 0, p_termAtt.length());
token.setPositionIncrement(p_posIncrAtt.getPositionIncrement());
token.setFlags(p_flagsAtt.getFlags());
token.setOffset(p_offsetAtt.startOffset(), p_offsetAtt.endOffset());
token.setType(p_typeAtt.type());
token.setPayload(p_payloadAtt.getPayload());
return token;
}
示例3: getNextSuffixInputToken
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
private Token getNextSuffixInputToken(Token token) throws IOException {
if (!suffix.incrementToken()) return null;
token.copyBuffer(termAtt.buffer(), 0, termAtt.length());
token.setPositionIncrement(posIncrAtt.getPositionIncrement());
token.setFlags(flagsAtt.getFlags());
token.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
token.setType(typeAtt.type());
token.setPayload(payloadAtt.getPayload());
return token;
}
示例4: addToken
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
void addToken(float score) {
if (numTokens < MAX_NUM_TOKENS_PER_GROUP) {
int termStartOffset = offsetAtt.startOffset();
int termEndOffset = offsetAtt.endOffset();
if (numTokens == 0) {
startOffset = matchStartOffset = termStartOffset;
endOffset = matchEndOffset = termEndOffset;
tot += score;
} else {
startOffset = Math.min(startOffset, termStartOffset);
endOffset = Math.max(endOffset, termEndOffset);
if (score > 0) {
if (tot == 0) {
matchStartOffset = offsetAtt.startOffset();
matchEndOffset = offsetAtt.endOffset();
} else {
matchStartOffset = Math.min(matchStartOffset, termStartOffset);
matchEndOffset = Math.max(matchEndOffset, termEndOffset);
}
tot += score;
}
}
Token token = new Token();
token.setOffset(termStartOffset, termEndOffset);
token.setEmpty().append(termAtt);
tokens[numTokens] = token;
scores[numTokens] = score;
numTokens++;
}
}
示例5: Token
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
private static Token createToken
(String term, int start, int offset, int positionIncrement)
{
Token token = new Token();
token.setOffset(start, offset);
token.copyBuffer(term.toCharArray(), 0, term.length());
token.setPositionIncrement(positionIncrement);
return token;
}
示例6: makeToken
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
private Token makeToken(String text, int posIncr, int startOffset, int endOffset) {
final Token t = new Token();
t.append(text);
t.setPositionIncrement(posIncr);
t.setOffset(startOffset, endOffset);
return t;
}
示例7: convert
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
@Override
public Collection<Token> convert(String origQuery) {
Collection<Token> result = new HashSet<>();
WhitespaceAnalyzer analyzer = new WhitespaceAnalyzer();
TokenStream ts = null;
try {
ts = analyzer.tokenStream("", origQuery);
// TODO: support custom attributes
CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
TypeAttribute typeAtt = ts.addAttribute(TypeAttribute.class);
FlagsAttribute flagsAtt = ts.addAttribute(FlagsAttribute.class);
PayloadAttribute payloadAtt = ts.addAttribute(PayloadAttribute.class);
PositionIncrementAttribute posIncAtt = ts.addAttribute(PositionIncrementAttribute.class);
ts.reset();
while (ts.incrementToken()) {
Token tok = new Token();
tok.copyBuffer(termAtt.buffer(), 0, termAtt.length());
tok.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
tok.setFlags(flagsAtt.getFlags());
tok.setPayload(payloadAtt.getPayload());
tok.setPositionIncrement(posIncAtt.getPositionIncrement());
tok.setType(typeAtt.type());
result.add(tok);
}
ts.end();
return result;
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
IOUtils.closeWhileHandlingException(ts);
}
}
示例8: updateInputToken
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
public Token updateInputToken(Token inputToken, Token lastPrefixToken) {
inputToken.setOffset(lastPrefixToken.endOffset() + inputToken.startOffset(),
lastPrefixToken.endOffset() + inputToken.endOffset());
return inputToken;
}
示例9: updateSuffixToken
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
public Token updateSuffixToken(Token suffixToken, Token lastInputToken) {
suffixToken.setOffset(lastInputToken.endOffset() + suffixToken.startOffset(),
lastInputToken.endOffset() + suffixToken.endOffset());
return suffixToken;
}
示例10: updateSuffixToken
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
/**
* The default implementation adds last prefix token end offset to the suffix token start and end offsets.
*
* @param suffixToken a token from the suffix stream
* @param lastPrefixToken the last token from the prefix stream
* @return consumer token
*/
public Token updateSuffixToken(Token suffixToken, Token lastPrefixToken) {
suffixToken.setOffset(lastPrefixToken.endOffset() + suffixToken.startOffset(),
lastPrefixToken.endOffset() + suffixToken.endOffset());
return suffixToken;
}