当前位置: 首页>>代码示例>>Java>>正文


Java PackedTokenAttributeImpl类代码示例

本文整理汇总了Java中org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl的典型用法代码示例。如果您正苦于以下问题:Java PackedTokenAttributeImpl类的具体用法?Java PackedTokenAttributeImpl怎么用?Java PackedTokenAttributeImpl使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


PackedTokenAttributeImpl类属于org.apache.lucene.analysis.tokenattributes包,在下文中一共展示了PackedTokenAttributeImpl类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: printlnToken

import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl; //导入依赖的package包/类
public static void printlnToken(String txt, Analyzer analyzer) throws IOException {
	System.out.println("---------"+txt.length()+"\n"+txt);
	TokenStream ts = analyzer.tokenStream("text", new StringReader(txt));
	/*//lucene 2.9 以下
	for(Token t= new Token(); (t=ts.next(t)) !=null;) {
		System.out.println(t);
	}*/
	/*while(ts.incrementToken()) {
		TermAttribute termAtt = (TermAttribute)ts.getAttribute(TermAttribute.class);
		OffsetAttribute offsetAtt = (OffsetAttribute)ts.getAttribute(OffsetAttribute.class);
		TypeAttribute typeAtt = (TypeAttribute)ts.getAttribute(TypeAttribute.class);

		System.out.println("("+termAtt.term()+","+offsetAtt.startOffset()+","+offsetAtt.endOffset()+",type="+typeAtt.type()+")");
	}*/
	ts.reset();
	for(PackedTokenAttributeImpl t= new PackedTokenAttributeImpl(); (t=TokenUtils.nextToken(ts, t)) !=null;) {
		System.out.println(t);
	}
	ts.close();
}
 
开发者ID:chenlb,项目名称:mmseg4j-solr,代码行数:21,代码来源:AnalyzerTest.java

示例2: incrementToken

import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl; //导入依赖的package包/类
@Override
public final boolean incrementToken() throws IOException {
    if (!tokens.isEmpty()) {
        if (current == null) {
            throw new IllegalArgumentException("current is null");
        }
        PackedTokenAttributeImpl token = tokens.removeFirst();
        restoreState(current);
        termAtt.setEmpty().append(token);
        posIncAtt.setPositionIncrement(0);
        return true;
    }
    if (input.incrementToken()) {
        process();
        if (!tokens.isEmpty()) {
            current = captureState();
        }
        return true;
    } else {
        return false;
    }
}
 
开发者ID:jprante,项目名称:elasticsearch-plugin-bundle,代码行数:23,代码来源:SymbolnameTokenFilter.java

示例3: incrementToken

import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl; //导入依赖的package包/类
@Override
public final boolean incrementToken() throws IOException {
    if (!tokens.isEmpty()) {
        if (current == null) {
            throw new IllegalArgumentException("current is null");
        }
        PackedTokenAttributeImpl token = tokens.removeFirst();
        restoreState(current);
        termAtt.setEmpty().append(token);
        posIncAtt.setPositionIncrement(0);
        return true;
    }
    if (input.incrementToken()) {
        detect();
        if (!tokens.isEmpty()) {
            current = captureState();
        }
        return true;
    } else {
        return false;
    }
}
 
开发者ID:jprante,项目名称:elasticsearch-plugin-bundle,代码行数:23,代码来源:StandardnumberTokenFilter.java

示例4: incrementToken

import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl; //导入依赖的package包/类
@Override
public boolean incrementToken() throws IOException {
  while (true) {
    if (curTermBuffer == null) {
      if (!input.incrementToken()) {
        return false;
      } else {
        curTermBuffer = termAtt.buffer().clone();
        curLen =
            ((PackedTokenAttributeImpl) termAtt).endOffset()
                - ((PackedTokenAttributeImpl) termAtt).startOffset();
      }
    } else {
      if (curPos < curLen) {
        termAtt.copyBuffer(curTermBuffer, curPos, 1);
        curPos++;
        return true;
      } else {
        curTermBuffer = null;
        curPos = 0;
      }
    }
  }
}
 
开发者ID:MysterionRise,项目名称:information-retrieval-adventure,代码行数:25,代码来源:SplitTokenByChar.java

示例5: incrementToken

import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl; //导入依赖的package包/类
@Override
public final boolean incrementToken() throws IOException {
    if (input.incrementToken()) {
        PackedTokenAttributeImpl token = eudex();
        restoreState(current);
        termAtt.setEmpty().append(token);
        offsetAtt.setOffset(token.startOffset(), token.endOffset());
        posIncAtt.setPositionIncrement(0);
        current = captureState();
        return true;
    } else {
        return false;
    }
}
 
开发者ID:jprante,项目名称:elasticsearch-analysis-phonetic-eudex,代码行数:15,代码来源:EudexTokenFilter.java

示例6: eudex

import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl; //导入依赖的package包/类
protected PackedTokenAttributeImpl  eudex() throws CharacterCodingException {
    String term = new String(termAtt.buffer(), 0, termAtt.length());
    CharSequence s = Long.toHexString(eudex.encode(term));
    PackedTokenAttributeImpl impl = new PackedTokenAttributeImpl();
    impl.append(s);
    return impl;
}
 
开发者ID:jprante,项目名称:elasticsearch-analysis-phonetic-eudex,代码行数:8,代码来源:EudexTokenFilter.java

示例7: CutLetterDigitFilter

import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl; //导入依赖的package包/类
public CutLetterDigitFilter(TokenStream input) {
	super(input);

	reusableToken = new PackedTokenAttributeImpl();
	termAtt = addAttribute(CharTermAttribute.class);
	offsetAtt = addAttribute(OffsetAttribute.class);
	typeAtt = addAttribute(TypeAttribute.class);
}
 
开发者ID:chenlb,项目名称:mmseg4j-solr,代码行数:9,代码来源:CutLetterDigitFilter.java

示例8: addToken

import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl; //导入依赖的package包/类
private void addToken(PackedTokenAttributeImpl oriToken, int termBufferOffset, int termBufferLength, byte type) {
	PackedTokenAttributeImpl token = TokenUtils.subToken(oriToken, termBufferOffset, termBufferLength);

	if(type == Character.DECIMAL_DIGIT_NUMBER) {
		token.setType(Word.TYPE_DIGIT);
	} else {
		token.setType(Word.TYPE_LETTER);
	}

	tokenQueue.offer(token);
}
 
开发者ID:chenlb,项目名称:mmseg4j-solr,代码行数:12,代码来源:CutLetterDigitFilter.java

示例9: incrementToken

import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl; //导入依赖的package包/类
public final boolean incrementToken() throws IOException {
	clearAttributes();
	PackedTokenAttributeImpl token = nextToken(reusableToken);
	if(token != null) {
		termAtt.copyBuffer(token.buffer(), 0, token.length());
		offsetAtt.setOffset(token.startOffset(), token.endOffset());
		typeAtt.setType(token.type());
		return true;
	} else {
		return false;
	}
}
 
开发者ID:chenlb,项目名称:mmseg4j-solr,代码行数:13,代码来源:CutLetterDigitFilter.java

示例10: process

import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl; //导入依赖的package包/类
protected void process() throws CharacterCodingException {
    String term = new String(termAtt.buffer(), 0, termAtt.length());
    for (CharSequence charSequence : process(term)) {
        if (charSequence != null) {
            PackedTokenAttributeImpl token = new PackedTokenAttributeImpl();
            token.append(charSequence);
            tokens.add(token);
        }
    }
}
 
开发者ID:jprante,项目名称:elasticsearch-plugin-bundle,代码行数:11,代码来源:SymbolnameTokenFilter.java

示例11: detect

import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl; //导入依赖的package包/类
private void detect() throws CharacterCodingException {
    CharSequence term = new String(termAtt.buffer(), 0, termAtt.length());
    Collection<CharSequence> variants = service.lookup(settings, term);
    for (CharSequence ch : variants) {
        if (ch != null) {
            PackedTokenAttributeImpl token = new PackedTokenAttributeImpl();
            token.append(ch);
            tokens.add(token);
        }
    }
}
 
开发者ID:jprante,项目名称:elasticsearch-plugin-bundle,代码行数:12,代码来源:StandardnumberTokenFilter.java

示例12: baseform

import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl; //导入依赖的package包/类
protected void baseform() throws CharacterCodingException {
    CharSequence term = new String(termAtt.buffer(), 0, termAtt.length());
    CharSequence s = dictionary.lookup(term);
    if (s != null && s.length() > 0) {
        PackedTokenAttributeImpl impl = new PackedTokenAttributeImpl();
        impl.append(s);
        tokens.add(impl);
    }
}
 
开发者ID:jprante,项目名称:elasticsearch-analysis-baseform,代码行数:10,代码来源:BaseformTokenFilter.java

示例13: detect

import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl; //导入依赖的package包/类
protected void detect() throws CharacterCodingException {
    CharSequence term = new String(termAtt.buffer(), 0, termAtt.length());
    Collection<CharSequence> variants = standardNumberService.lookup(term);
    for (CharSequence ch : variants) {
        if (ch != null) {
            PackedTokenAttributeImpl token = new PackedTokenAttributeImpl();
            token.append(ch);
            tokens.add(token);
        }
    }
}
 
开发者ID:jprante,项目名称:elasticsearch-analysis-german,代码行数:12,代码来源:StandardNumberTokenFilter.java

示例14: detect

import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl; //导入依赖的package包/类
protected void detect() throws CharacterCodingException {
    CharSequence term = new String(termAtt.buffer(), 0, termAtt.length());
    Collection<CharSequence> variants = service.lookup(settings, term);
    for (CharSequence ch : variants) {
        if (ch != null) {
            PackedTokenAttributeImpl token = new PackedTokenAttributeImpl();
            token.append(ch);
            tokens.add(token);
        }
    }
}
 
开发者ID:jprante,项目名称:elasticsearch-analysis-standardnumber,代码行数:12,代码来源:StandardNumberTokenFilter.java

示例15: nextToken

import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl; //导入依赖的package包/类
private PackedTokenAttributeImpl nextToken(PackedTokenAttributeImpl reusableToken) throws IOException {
	assert reusableToken != null;

	//先使用上次留下来的。
	PackedTokenAttributeImpl nextToken = tokenQueue.poll();
	if(nextToken != null) {
		return nextToken;
	}

	nextToken = TokenUtils.nextToken(input, reusableToken);

	if(nextToken != null &&
			(Word.TYPE_LETTER_OR_DIGIT.equalsIgnoreCase(nextToken.type())
				|| Word.TYPE_DIGIT_OR_LETTER.equalsIgnoreCase(nextToken.type()))
			) {
		final char[] buffer = nextToken.buffer();
		final int length = nextToken.length();
		byte lastType = (byte) Character.getType(buffer[0]);	//与上次的字符是否同类
		int termBufferOffset = 0;
		int termBufferLength = 0;
		for(int i=0;i<length;i++) {
			byte type = (byte) Character.getType(buffer[i]);
			if(type <= Character.MODIFIER_LETTER) {
				type = Character.LOWERCASE_LETTER;
			}
			if(type != lastType) {	//与上一次的不同
				addToken(nextToken, termBufferOffset, termBufferLength, lastType);

				termBufferOffset += termBufferLength;
				termBufferLength = 0;

				lastType = type;
			}

			termBufferLength++;
		}
		if(termBufferLength > 0) {	//最后一次
			addToken(nextToken, termBufferOffset, termBufferLength, lastType);
		}
		nextToken = tokenQueue.poll();
	}

	return nextToken;
}
 
开发者ID:chenlb,项目名称:mmseg4j-solr,代码行数:45,代码来源:CutLetterDigitFilter.java


注:本文中的org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。