本文整理汇总了Java中org.apache.lucene.analysis.Token.setPayload方法的典型用法代码示例。如果您正苦于以下问题:Java Token.setPayload方法的具体用法?Java Token.setPayload怎么用?Java Token.setPayload使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.analysis.Token
的用法示例。
在下文中一共展示了Token.setPayload方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: next
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
public Token next(Token token) {
if (currentPos == 0) return null;
if (tokenPos <= currentPos) {
token.setTermBuffer(sentence, textPositions[2 * tokenPos],
textPositions[2 * tokenPos + 1]
- textPositions[2 * tokenPos]);
Payload p = new Payload();
byte[] b = new byte[4];
b[0] = (byte) ((payloads[tokenPos] >>> 16) & 255);
b[1] = (byte) ((payloads[tokenPos] >>> 24) & 255);
b[2] = (byte) ((payloads[tokenPos] >>> 8) & 255);
b[3] = (byte) (payloads[tokenPos] & 255);
p.setData(b);
token.setPayload(p);
tokenPos++;
return token;
}
return null;
}
示例2: testLegalbutVeryLargePositions
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
public void testLegalbutVeryLargePositions() throws Exception {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
Document doc = new Document();
Token t1 = new Token("foo", 0, 3);
t1.setPositionIncrement(Integer.MAX_VALUE-500);
if (random().nextBoolean()) {
t1.setPayload(new BytesRef(new byte[] { 0x1 } ));
}
TokenStream overflowingTokenStream = new CannedTokenStream(
new Token[] { t1 }
);
Field field = new TextField("foo", overflowingTokenStream);
doc.add(field);
iw.addDocument(doc);
iw.close();
dir.close();
}
示例3: testLegalbutVeryLargeOffsets
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
public void testLegalbutVeryLargeOffsets() throws Exception {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
Document doc = new Document();
Token t1 = new Token("foo", 0, Integer.MAX_VALUE-500);
if (random().nextBoolean()) {
t1.setPayload(new BytesRef("test"));
}
Token t2 = new Token("foo", Integer.MAX_VALUE-500, Integer.MAX_VALUE);
TokenStream tokenStream = new CannedTokenStream(
new Token[] { t1, t2 }
);
FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
// store some term vectors for the checkindex cross-check
ft.setStoreTermVectors(true);
ft.setStoreTermVectorPositions(true);
ft.setStoreTermVectorOffsets(true);
Field field = new Field("foo", tokenStream, ft);
doc.add(field);
iw.addDocument(doc);
iw.close();
dir.close();
}
示例4: analyze
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
protected void analyze(Collection<Token> result, String text, int offset, int flagsAttValue) throws IOException {
TokenStream stream = analyzer.tokenStream("", text);
// TODO: support custom attributes
CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);
TypeAttribute typeAtt = stream.addAttribute(TypeAttribute.class);
PayloadAttribute payloadAtt = stream.addAttribute(PayloadAttribute.class);
PositionIncrementAttribute posIncAtt = stream.addAttribute(PositionIncrementAttribute.class);
OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class);
stream.reset();
while (stream.incrementToken()) {
Token token = new Token();
token.copyBuffer(termAtt.buffer(), 0, termAtt.length());
token.setOffset(offset + offsetAtt.startOffset(),
offset + offsetAtt.endOffset());
token.setFlags(flagsAttValue); //overwriting any flags already set...
token.setType(typeAtt.type());
token.setPayload(payloadAtt.getPayload());
token.setPositionIncrement(posIncAtt.getPositionIncrement());
result.add(token);
}
stream.end();
stream.close();
}
示例5: getNextPrefixInputToken
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
private Token getNextPrefixInputToken(Token token) throws IOException {
if (!prefix.incrementToken()) return null;
token.copyBuffer(p_termAtt.buffer(), 0, p_termAtt.length());
token.setPositionIncrement(p_posIncrAtt.getPositionIncrement());
token.setFlags(p_flagsAtt.getFlags());
token.setOffset(p_offsetAtt.startOffset(), p_offsetAtt.endOffset());
token.setType(p_typeAtt.type());
token.setPayload(p_payloadAtt.getPayload());
return token;
}
示例6: getNextSuffixInputToken
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
private Token getNextSuffixInputToken(Token token) throws IOException {
if (!suffix.incrementToken()) return null;
token.copyBuffer(termAtt.buffer(), 0, termAtt.length());
token.setPositionIncrement(posIncrAtt.getPositionIncrement());
token.setFlags(flagsAtt.getFlags());
token.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
token.setType(typeAtt.type());
token.setPayload(payloadAtt.getPayload());
return token;
}
示例7: getToken
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
/** Just make a token with the text, and set the payload
* to the text as well. Offets increment "naturally". */
private Token getToken(String text) {
Token t = new Token(text, curOffset, curOffset+text.length());
t.setPayload(new BytesRef(text));
curOffset++;
return t;
}
示例8: convert
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
@Override
public Collection<Token> convert(String origQuery) {
Collection<Token> result = new HashSet<>();
WhitespaceAnalyzer analyzer = new WhitespaceAnalyzer();
TokenStream ts = null;
try {
ts = analyzer.tokenStream("", origQuery);
// TODO: support custom attributes
CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
TypeAttribute typeAtt = ts.addAttribute(TypeAttribute.class);
FlagsAttribute flagsAtt = ts.addAttribute(FlagsAttribute.class);
PayloadAttribute payloadAtt = ts.addAttribute(PayloadAttribute.class);
PositionIncrementAttribute posIncAtt = ts.addAttribute(PositionIncrementAttribute.class);
ts.reset();
while (ts.incrementToken()) {
Token tok = new Token();
tok.copyBuffer(termAtt.buffer(), 0, termAtt.length());
tok.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
tok.setFlags(flagsAtt.getFlags());
tok.setPayload(payloadAtt.getPayload());
tok.setPositionIncrement(posIncAtt.getPositionIncrement());
tok.setType(typeAtt.type());
result.add(tok);
}
ts.end();
return result;
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
IOUtils.closeWhileHandlingException(ts);
}
}
示例9: next
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
public Token next(Token token) {
boolean nameFound = nameMatcher.find();
boolean indexFound = indexMatcher.find();
if (nameFound && indexFound) {
final int nstart = nameMatcher.start();
final int nend = nameMatcher.end();
final int indexOfEscapedQuotes = jsonSentence.indexOf("\\\"", nstart
+ BEFORE_CONST);
if (indexOfEscapedQuotes != -1
&& indexOfEscapedQuotes < nend - AFTER_CONST) {
String str = jsonSentence.substring(nstart + BEFORE_CONST, nend
- AFTER_CONST);
str = str.replace("\\\"", "\"");
token.setTermBuffer(str);
} else {
token.setTermBuffer(jsonSentence, nstart + BEFORE_CONST,
nameMatcher.end() - AFTER_CONST - nstart - BEFORE_CONST);
}
String index = jsonSentence.substring(indexMatcher.start()
+ BEFORE_CONST, indexMatcher.end() - AFTER_CONST);
String[] split = index.split("_");
for (int i = 0; i < 4; i++) {
intbuffer[i] = Integer.parseInt(split[i]);
if (intbuffer[i] > 255) {
throw new OverflowException(
"Exceeded payload size for element " + i + " = "
+ intbuffer[i]);
}
buffer[i] = (byte) (intbuffer[i] & 255);
}
if (compressPayload) {
byte[] bytes = new byte[8];
try {
token.setPayload(getVarDiffPayload(bytes));
} catch (ArrayIndexOutOfBoundsException e) {
throw new OverflowException(
"Exceeded payload size for element ");
}
} else {
token.setPayload(new Payload(buffer.clone()));
}
// if (compressPayload) {
// byte[] bytes = new byte[8];
// try {
// token.setPayload(getVarDiffPayload(bytes));
// }catch (ArrayIndexOutOfBoundsException e) {
// bytes = new byte[16];
// try {
// token.setPayload(getVarDiffPayload(bytes));
// } catch(ArrayIndexOutOfBoundsException ee) {
// throw new
// OverflowException("Exceeded payload size for element ");
// }
// }
//
// } else {
// for (int i = 0; i < 4; i++) {
// if(intbuffer[i] > 255) {
// throw new OverflowException("Exceeded payload size for element "
// + i + " = " + intbuffer[i]);
// }
// buffer[i] = (byte) (intbuffer[i] & 255);
// }
// token.setPayload(new Payload(buffer.clone()));
// }
return token;
}
return null;
}
示例10: setReusableTokenFromLocal
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
private Token setReusableTokenFromLocal(final Token reusableToken,
TreeToken local) {
reusableToken.setTermBuffer(local.label);
reusableToken.setPayload(local.getPayload());
return reusableToken;
}