本文整理汇总了Java中org.apache.lucene.analysis.Token.buffer方法的典型用法代码示例。如果您正苦于以下问题:Java Token.buffer方法的具体用法?Java Token.buffer怎么用?Java Token.buffer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.analysis.Token
的用法示例。
在下文中一共展示了Token.buffer方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getLookupResults
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
private List<LookupResult> getLookupResults(SpellingOptions options, Token currentToken) throws IOException {
CharsRef scratch = new CharsRef();
scratch.chars = currentToken.buffer();
scratch.offset = 0;
scratch.length = currentToken.length();
boolean onlyMorePopular = (options.suggestMode == SuggestMode.SUGGEST_MORE_POPULAR) &&
!(lookup instanceof WFSTCompletionLookup) &&
!(lookup instanceof AnalyzingSuggester);
List<LookupResult> suggestions = lookup.lookup(scratch, onlyMorePopular, options.count);
if (suggestions == null || suggestions.size() == 0) {
return null;
}
return suggestions;
}
示例2: addToken
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
private void addToken(Token oriToken, int termBufferOffset, int termBufferLength, byte type) {
Token token = new Token(oriToken.buffer(), termBufferOffset, termBufferLength,
oriToken.startOffset()+termBufferOffset, oriToken.startOffset()+termBufferOffset+termBufferLength);
if(type == Character.DECIMAL_DIGIT_NUMBER) {
token.setType(Word.TYPE_DIGIT);
} else {
token.setType(Word.TYPE_LETTER);
}
tokenQueue.offer(token);
}
示例3: getSuggestions
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
@Override
public SpellingResult getSuggestions(SpellingOptions options) throws IOException {
LOG.debug("getSuggestions: " + options.tokens);
if (lookup == null) {
LOG.info("Lookup is null - invoke spellchecker.build first");
return EMPTY_RESULT;
}
SpellingResult res = new SpellingResult();
CharsRef scratch = new CharsRef();
for (Token t : options.tokens) {
scratch.chars = t.buffer();
scratch.offset = 0;
scratch.length = t.length();
boolean onlyMorePopular = (options.suggestMode == SuggestMode.SUGGEST_MORE_POPULAR) &&
!(lookup instanceof WFSTCompletionLookup) &&
!(lookup instanceof AnalyzingSuggester);
List<LookupResult> suggestions = lookup.lookup(scratch, onlyMorePopular, options.count);
if (suggestions == null) {
continue;
}
if (options.suggestMode != SuggestMode.SUGGEST_MORE_POPULAR) {
Collections.sort(suggestions);
}
for (LookupResult lr : suggestions) {
res.add(t, lr.key.toString(), (int)lr.value);
}
}
return res;
}
示例4: toNamedList
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
protected NamedList toNamedList(boolean shardRequest,
SpellingResult spellingResult, String origQuery, boolean extendedResults,
boolean collate, boolean correctlySpelled) {
NamedList result = new NamedList();
Map<Token,LinkedHashMap<String,Integer>> suggestions = spellingResult
.getSuggestions();
boolean hasFreqInfo = spellingResult.hasTokenFrequencyInfo();
boolean hasSuggestions = false;
boolean hasZeroFrequencyToken = false;
for (Map.Entry<Token,LinkedHashMap<String,Integer>> entry : suggestions
.entrySet()) {
Token inputToken = entry.getKey();
String tokenString = new String(inputToken.buffer(), 0, inputToken
.length());
Map<String,Integer> theSuggestions = new LinkedHashMap<>(
entry.getValue());
Iterator<String> sugIter = theSuggestions.keySet().iterator();
while (sugIter.hasNext()) {
String sug = sugIter.next();
if (sug.equals(tokenString)) {
sugIter.remove();
}
}
if (theSuggestions.size() > 0) {
hasSuggestions = true;
}
if (theSuggestions != null && (theSuggestions.size() > 0 || shardRequest)) {
SimpleOrderedMap suggestionList = new SimpleOrderedMap();
suggestionList.add("numFound", theSuggestions.size());
suggestionList.add("startOffset", inputToken.startOffset());
suggestionList.add("endOffset", inputToken.endOffset());
// Logical structure of normal (non-extended) results:
// "suggestion":["alt1","alt2"]
//
// Logical structure of the extended results:
// "suggestion":[
// {"word":"alt1","freq":7},
// {"word":"alt2","freq":4}
// ]
if (extendedResults && hasFreqInfo) {
suggestionList.add("origFreq", spellingResult
.getTokenFrequency(inputToken));
ArrayList<SimpleOrderedMap> sugs = new ArrayList<>();
suggestionList.add("suggestion", sugs);
for (Map.Entry<String,Integer> suggEntry : theSuggestions.entrySet()) {
SimpleOrderedMap sugEntry = new SimpleOrderedMap();
sugEntry.add("word", suggEntry.getKey());
sugEntry.add("freq", suggEntry.getValue());
sugs.add(sugEntry);
}
} else {
suggestionList.add("suggestion", theSuggestions.keySet());
}
if (hasFreqInfo) {
int tokenFrequency = spellingResult.getTokenFrequency(inputToken);
if (tokenFrequency == 0) {
hasZeroFrequencyToken = true;
}
}
result.add(tokenString, suggestionList);
}
}
if (extendedResults) {
result.add("correctlySpelled", correctlySpelled);
}
return result;
}
示例5: toNamedList
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
protected NamedList toNamedList(boolean shardRequest,
SpellingResult spellingResult, String origQuery, boolean extendedResults,
boolean collate, boolean correctlySpelled) {
NamedList result = new NamedList();
Map<Token,LinkedHashMap<String,Integer>> suggestions = spellingResult
.getSuggestions();
boolean hasFreqInfo = spellingResult.hasTokenFrequencyInfo();
boolean hasSuggestions = false;
boolean hasZeroFrequencyToken = false;
for (Map.Entry<Token,LinkedHashMap<String,Integer>> entry : suggestions
.entrySet()) {
Token inputToken = entry.getKey();
String tokenString = new String(inputToken.buffer(), 0, inputToken
.length());
Map<String,Integer> theSuggestions = new LinkedHashMap<String,Integer>(
entry.getValue());
Iterator<String> sugIter = theSuggestions.keySet().iterator();
while (sugIter.hasNext()) {
String sug = sugIter.next();
if (sug.equals(tokenString)) {
sugIter.remove();
}
}
if (theSuggestions.size() > 0) {
hasSuggestions = true;
}
if (theSuggestions != null && (theSuggestions.size() > 0 || shardRequest)) {
SimpleOrderedMap suggestionList = new SimpleOrderedMap();
suggestionList.add("numFound", theSuggestions.size());
suggestionList.add("startOffset", inputToken.startOffset());
suggestionList.add("endOffset", inputToken.endOffset());
// Logical structure of normal (non-extended) results:
// "suggestion":["alt1","alt2"]
//
// Logical structure of the extended results:
// "suggestion":[
// {"word":"alt1","freq":7},
// {"word":"alt2","freq":4}
// ]
if (extendedResults && hasFreqInfo) {
suggestionList.add("origFreq", spellingResult
.getTokenFrequency(inputToken));
ArrayList<SimpleOrderedMap> sugs = new ArrayList<SimpleOrderedMap>();
suggestionList.add("suggestion", sugs);
for (Map.Entry<String,Integer> suggEntry : theSuggestions.entrySet()) {
SimpleOrderedMap sugEntry = new SimpleOrderedMap();
sugEntry.add("word", suggEntry.getKey());
sugEntry.add("freq", suggEntry.getValue());
sugs.add(sugEntry);
}
} else {
suggestionList.add("suggestion", theSuggestions.keySet());
}
if (hasFreqInfo) {
int tokenFrequency = spellingResult.getTokenFrequency(inputToken);
if (tokenFrequency == 0) {
hasZeroFrequencyToken = true;
}
}
result.add(tokenString, suggestionList);
}
}
if (extendedResults) {
result.add("correctlySpelled", correctlySpelled);
}
return result;
}
示例6: getSuggestions
import org.apache.lucene.analysis.Token; //导入方法依赖的package包/类
@Override
public SpellingResult getSuggestions(SpellingOptions options) throws IOException {
LOG.debug("getSuggestions: " + options.tokens);
if (lookup == null) {
LOG.info("Lookup is null - invoke spellchecker.build first");
return EMPTY_RESULT;
}
SpellingResult res = new SpellingResult();
CharsRef scratch = new CharsRef();
for (Token currentToken : options.tokens) {
scratch.chars = currentToken.buffer();
scratch.offset = 0;
scratch.length = currentToken.length();
boolean onlyMorePopular = (options.suggestMode == SuggestMode.SUGGEST_MORE_POPULAR) &&
!(lookup instanceof WFSTCompletionLookup) &&
!(lookup instanceof AnalyzingSuggester);
// get more than the requested suggestions as a lot get collapsed by the corrections
List<LookupResult> suggestions = lookup.lookup(scratch, onlyMorePopular, options.count * 10);
if (suggestions == null || suggestions.size() == 0) {
continue;
}
if (options.suggestMode != SuggestMode.SUGGEST_MORE_POPULAR) {
Collections.sort(suggestions);
}
final LinkedHashMap<String, Integer> lhm = new LinkedHashMap<String, Integer>();
for (LookupResult lr : suggestions) {
String suggestion = lr.key.toString();
if(this.suggestionAnalyzer != null) {
String correction = getAnalyzerResult(suggestion);
// multiple could map to the same, so don't repeat suggestions
if(!isStringNullOrEmpty(correction)){
if(lhm.containsKey(correction)){
lhm.put(correction, lhm.get(correction) + (int) lr.value);
}
else {
lhm.put(correction, (int) lr.value);
}
}
}
else {
lhm.put(suggestion, (int) lr.value);
}
if(lhm.size() >= options.count){
break;
}
}
// sort by new doc frequency
Map<String, Integer> orderedMap = null;
if (options.suggestMode != SuggestMode.SUGGEST_MORE_POPULAR){
// retain the sort order from above
orderedMap = lhm;
}
else {
orderedMap = new TreeMap<String, Integer>(new Comparator<String>() {
@Override
public int compare(String s1, String s2) {
return lhm.get(s2).compareTo(lhm.get(s1));
}
});
orderedMap.putAll(lhm);
}
for(Map.Entry<String, Integer> entry: orderedMap.entrySet()){
res.add(currentToken, entry.getKey(), entry.getValue());
}
}
return res;
}