当前位置: 首页>>代码示例>>Java>>正文


Java OffsetAttribute类代码示例

本文整理汇总了Java中org.apache.lucene.analysis.tokenattributes.OffsetAttribute的典型用法代码示例。如果您正苦于以下问题:Java OffsetAttribute类的具体用法?Java OffsetAttribute怎么用?Java OffsetAttribute使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


OffsetAttribute类属于org.apache.lucene.analysis.tokenattributes包,在下文中一共展示了OffsetAttribute类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; //导入依赖的package包/类
public static void main(String[] args) throws IOException {
	List<Term> parse = ToAnalysis.parse("中华人民 共和国 成立了 ");
	System.out.println(parse);
	List<Term> parse1 = IndexAnalysis.parse("你吃过饭了没有!!!!!吃过无妨论文");
	
  
	//System.out.println(parse1);
	String text11="ZW321282050000000325";
	
	Tokenizer tokenizer = new AnsjTokenizer(new StringReader(text11), 0, true);
	CharTermAttribute termAtt = tokenizer.addAttribute(CharTermAttribute.class);
	OffsetAttribute offsetAtt = 
			tokenizer.addAttribute(OffsetAttribute.class);
		PositionIncrementAttribute positionIncrementAtt = 
			tokenizer.addAttribute(PositionIncrementAttribute.class);

    tokenizer.reset();
	while (tokenizer.incrementToken()){

	      System.out.print(new String(termAtt.toString()+" ") );
		//  System.out.print( offsetAtt.startOffset() + "-" + offsetAtt.endOffset() + "-" );
		//System.out.print( positionIncrementAtt.getPositionIncrement() +"/");

	}
	tokenizer.close();
}
 
开发者ID:dimensoft,项目名称:improved-journey,代码行数:27,代码来源:TestAnsj.java

示例2: assertTokenStream

import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; //导入依赖的package包/类
public static void assertTokenStream(TokenStream tokenStream, String[] expectedCharTerms, String[] expectedTypes, int[] expectedStartOffsets, int[] expectedEndOffsets) throws IOException {
    tokenStream.reset();
    int index = 0;
    while (tokenStream.incrementToken() == true) {
        assertEquals(expectedCharTerms[index], tokenStream.getAttribute(CharTermAttribute.class).toString());

        if(expectedTypes != null) {
            assertEquals(expectedTypes[index], tokenStream.getAttribute(TypeAttribute.class).type());
        }

        OffsetAttribute offsets = tokenStream.getAttribute(OffsetAttribute.class);

        if(expectedStartOffsets != null) {
            assertEquals(expectedStartOffsets[index], offsets.startOffset());
        }

        if(expectedEndOffsets != null) {
            assertEquals(expectedEndOffsets[index], offsets.endOffset());
        }

        index++;
    }
    tokenStream.end();
}
 
开发者ID:open-korean-text,项目名称:elasticsearch-analysis-openkoreantext,代码行数:25,代码来源:TokenStreamAssertions.java

示例3: parse

import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; //导入依赖的package包/类
private List<TokenData> parse(String text) {
    NamedAnalyzer analyzer = getAnalysisService().indexAnalyzers.get("test");

    try {
        try (TokenStream ts = analyzer.tokenStream("test", new StringReader(text))) {
            List<TokenData> result = new ArrayList<>();
            CharTermAttribute charTerm = ts.addAttribute(CharTermAttribute.class);
            OffsetAttribute offset = ts.addAttribute(OffsetAttribute.class);
            PositionIncrementAttribute position = ts.addAttribute(PositionIncrementAttribute.class);
            ts.reset();
            while (ts.incrementToken()) {
                String original = text.substring(offset.startOffset(), offset.endOffset());
                result.add(token(original, charTerm.toString(), position.getPositionIncrement()));
            }
            ts.end();

            return result;
        }
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}
 
开发者ID:EvidentSolutions,项目名称:elasticsearch-analysis-voikko,代码行数:23,代码来源:VoikkoTokenFilterTests.java

示例4: findGoodEndForNoHighlightExcerpt

import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; //导入依赖的package包/类
private static int findGoodEndForNoHighlightExcerpt(int noMatchSize, Analyzer analyzer, String fieldName, String contents)
        throws IOException {
    try (TokenStream tokenStream = analyzer.tokenStream(fieldName, contents)) {
        if (!tokenStream.hasAttribute(OffsetAttribute.class)) {
            // Can't split on term boundaries without offsets
            return -1;
        }
        int end = -1;
        tokenStream.reset();
        while (tokenStream.incrementToken()) {
            OffsetAttribute attr = tokenStream.getAttribute(OffsetAttribute.class);
            if (attr.endOffset() >= noMatchSize) {
                // Jump to the end of this token if it wouldn't put us past the boundary
                if (attr.endOffset() == noMatchSize) {
                    end = noMatchSize;
                }
                return end;
            }
            end = attr.endOffset();
        }
        tokenStream.end();
        // We've exhausted the token stream so we should just highlight everything.
        return end;
    }
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:26,代码来源:PlainHighlighter.java

示例5: PrefixAwareTokenFilter

import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; //导入依赖的package包/类
public PrefixAwareTokenFilter(TokenStream prefix, TokenStream suffix) {
  super(suffix);
  this.suffix = suffix;
  this.prefix = prefix;
  prefixExhausted = false;
  
  termAtt = addAttribute(CharTermAttribute.class);
  posIncrAtt = addAttribute(PositionIncrementAttribute.class);
  payloadAtt = addAttribute(PayloadAttribute.class);
  offsetAtt = addAttribute(OffsetAttribute.class);
  typeAtt = addAttribute(TypeAttribute.class);
  flagsAtt = addAttribute(FlagsAttribute.class);

  p_termAtt = prefix.addAttribute(CharTermAttribute.class);
  p_posIncrAtt = prefix.addAttribute(PositionIncrementAttribute.class);
  p_payloadAtt = prefix.addAttribute(PayloadAttribute.class);
  p_offsetAtt = prefix.addAttribute(OffsetAttribute.class);
  p_typeAtt = prefix.addAttribute(TypeAttribute.class);
  p_flagsAtt = prefix.addAttribute(FlagsAttribute.class);
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:21,代码来源:PrefixAwareTokenFilter.java

示例6: assertOffsets

import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; //导入依赖的package包/类
static private void assertOffsets(String inputStr, TokenStream tokenStream, List<String> expected) {
    try {
        List<String> termList = new ArrayList<String>();
        // CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class);
        OffsetAttribute offsetAttr = tokenStream.addAttribute(OffsetAttribute.class);
        while (tokenStream.incrementToken()) {
            int start = offsetAttr.startOffset();
            int end = offsetAttr.endOffset();
            termList.add(inputStr.substring(start, end));
        }
        System.out.println(String.join(" ", termList));
        assertThat(termList, is(expected));
    } catch (IOException e) {
        assertTrue(false);
    }
}
 
开发者ID:BuddhistDigitalResourceCenter,项目名称:lucene-bo,代码行数:17,代码来源:TibetanAnalyzerTest.java

示例7: findGoodEndForNoHighlightExcerpt

import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; //导入依赖的package包/类
private static int findGoodEndForNoHighlightExcerpt(int noMatchSize, Analyzer analyzer, String fieldName, String contents) throws IOException {
    try (TokenStream tokenStream = analyzer.tokenStream(fieldName, contents)) {
        if (!tokenStream.hasAttribute(OffsetAttribute.class)) {
            // Can't split on term boundaries without offsets
            return -1;
        }
        int end = -1;
        tokenStream.reset();
        while (tokenStream.incrementToken()) {
            OffsetAttribute attr = tokenStream.getAttribute(OffsetAttribute.class);
            if (attr.endOffset() >= noMatchSize) {
                // Jump to the end of this token if it wouldn't put us past the boundary
                if (attr.endOffset() == noMatchSize) {
                    end = noMatchSize;
                }
                return end;
            }
            end = attr.endOffset();
        }
        tokenStream.end();
        // We've exhausted the token stream so we should just highlight everything.
        return end;
    }
}
 
开发者ID:baidu,项目名称:Elasticsearch,代码行数:25,代码来源:PlainHighlighter.java

示例8: LTPTokenizer

import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; //导入依赖的package包/类
/**
 * Lucene constructor
 *
 * @throws UnirestException
 * @throws JSONException
 * @throws IOException
 */
public LTPTokenizer(Set<String> filter)
        throws IOException, JSONException, UnirestException {
    super();
    logger.info("LTPTokenizer Initialize......");
    // Add token offset attribute
    offsetAttr = addAttribute(OffsetAttribute.class);
    // Add token content attribute
    charTermAttr = addAttribute(CharTermAttribute.class);
    // Add token type attribute
    typeAttr = addAttribute(TypeAttribute.class);
    // Add token position attribute
    piAttr = addAttribute(PositionIncrementAttribute.class);
    // Create a new word segmenter to get tokens
    LTPSeg = new LTPWordSegmenter(input);
    // Add filter words set
    this.filter = filter;
}
 
开发者ID:lonly197,项目名称:elasticsearch-analysis-ltp,代码行数:25,代码来源:LTPTokenizer.java

示例9: testSearch

import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; //导入依赖的package包/类
@Test
public void testSearch() throws IOException {
    LcPinyinAnalyzer analyzer = new LcPinyinAnalyzer(AnalysisSetting.search);
    TokenStream tokenStream = analyzer.tokenStream("lc", "重qing");

    CharTermAttribute charTermAttribute = tokenStream.getAttribute(CharTermAttribute.class);
    OffsetAttribute offsetAttribute = tokenStream.getAttribute(OffsetAttribute.class);
    PositionIncrementAttribute positionIncrementAttribute = tokenStream.getAttribute(PositionIncrementAttribute.class);

    tokenStream.reset();
    Assert.assertTrue(tokenStream.incrementToken());
    Assert.assertEquals(charTermAttribute.toString(), "重");
    Assert.assertEquals(offsetAttribute.startOffset(), 0);
    Assert.assertEquals(offsetAttribute.endOffset(), 1);
    Assert.assertEquals(positionIncrementAttribute.getPositionIncrement(), 1);

    Assert.assertTrue(tokenStream.incrementToken());
    Assert.assertEquals(charTermAttribute.toString(), "qing");
    Assert.assertEquals(offsetAttribute.startOffset(), 1);
    Assert.assertEquals(offsetAttribute.endOffset(), 5);
    Assert.assertEquals(positionIncrementAttribute.getPositionIncrement(), 1);

    tokenStream.close();
}
 
开发者ID:gitchennan,项目名称:elasticsearch-analysis-lc-pinyin,代码行数:25,代码来源:PinyinAnalysisTest.java

示例10: testFullPinyinFilter

import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; //导入依赖的package包/类
public void testFullPinyinFilter() throws IOException {

        LcPinyinAnalyzer analyzer = new LcPinyinAnalyzer(AnalysisSetting.search);
        TokenStream tokenStream = analyzer.tokenStream("lc", "作者 : 陈楠");

        LcPinyinTokenFilter lcPinyinTokenFilter = new LcPinyinTokenFilter(tokenStream, PinyinFilterSetting.full_pinyin);

        CharTermAttribute charTermAttribute = lcPinyinTokenFilter.getAttribute(CharTermAttribute.class);
        OffsetAttribute offsetAttribute = lcPinyinTokenFilter.getAttribute(OffsetAttribute.class);
        PositionIncrementAttribute positionIncrementAttribute = lcPinyinTokenFilter.getAttribute(PositionIncrementAttribute.class);

        lcPinyinTokenFilter.reset();
        while (lcPinyinTokenFilter.incrementToken()) {
            System.out.println(charTermAttribute.toString() + ":" + offsetAttribute.startOffset() + "," + offsetAttribute.endOffset() + ":" + positionIncrementAttribute.getPositionIncrement());
        }
        lcPinyinTokenFilter.close();
    }
 
开发者ID:gitchennan,项目名称:elasticsearch-analysis-lc-pinyin,代码行数:18,代码来源:PinyinFilterTest.java

示例11: testFirstLetterFilter

import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; //导入依赖的package包/类
public void testFirstLetterFilter() throws IOException {

        LcPinyinAnalyzer analyzer = new LcPinyinAnalyzer(AnalysisSetting.search);
        TokenStream tokenStream = analyzer.tokenStream("lc", "作者 : 陈楠");

        LcPinyinTokenFilter lcPinyinTokenFilter = new LcPinyinTokenFilter(tokenStream, PinyinFilterSetting.first_letter);

        CharTermAttribute charTermAttribute = lcPinyinTokenFilter.getAttribute(CharTermAttribute.class);
        OffsetAttribute offsetAttribute = lcPinyinTokenFilter.getAttribute(OffsetAttribute.class);
        PositionIncrementAttribute positionIncrementAttribute = lcPinyinTokenFilter.getAttribute(PositionIncrementAttribute.class);

        lcPinyinTokenFilter.reset();
        while (lcPinyinTokenFilter.incrementToken()) {
            System.out.println(charTermAttribute.toString() + ":" + offsetAttribute.startOffset() + "," + offsetAttribute.endOffset() + ":" + positionIncrementAttribute.getPositionIncrement());
        }
        lcPinyinTokenFilter.close();
    }
 
开发者ID:gitchennan,项目名称:elasticsearch-analysis-lc-pinyin,代码行数:18,代码来源:PinyinFilterTest.java

示例12: copyTo

import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; //导入依赖的package包/类
@Override
public void copyTo(AttributeImpl target) {
  if (target instanceof Token) {
    final Token to = (Token) target;
    to.reinit(this);
    // reinit shares the payload, so clone it:
    if (payload !=null) {
      to.payload = payload.clone();
    }
  } else {
    super.copyTo(target);
    ((OffsetAttribute) target).setOffset(startOffset, endOffset);
    ((PositionIncrementAttribute) target).setPositionIncrement(positionIncrement);
    ((PayloadAttribute) target).setPayload((payload == null) ? null : payload.clone());
    ((FlagsAttribute) target).setFlags(flags);
    ((TypeAttribute) target).setType(type);
  }
}
 
开发者ID:gncloud,项目名称:fastcatsearch3,代码行数:19,代码来源:Token.java

示例13: WrappedTokenStream

import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; //导入依赖的package包/类
public WrappedTokenStream(TokenStream tokenStream, String pText) {
	this.pText = pText;
	this.tokenStream = tokenStream;
	if(tokenStream.hasAttribute(CharTermAttribute.class)) {
		charTermAttribute = tokenStream.getAttribute(CharTermAttribute.class);
	}
	if(tokenStream.hasAttribute(OffsetAttribute.class)) {
		offsetAttribute = tokenStream.getAttribute(OffsetAttribute.class);
	}
	if(tokenStream.hasAttribute(CharsRefTermAttribute.class)) {
		charsRefTermAttribute = tokenStream.getAttribute(CharsRefTermAttribute.class);
	}
	
	if(tokenStream.hasAttribute(AdditionalTermAttribute.class)) {
		additionalTermAttribute = tokenStream.getAttribute(AdditionalTermAttribute.class);
	}
	
	additionalTermAttributeLocal.init(this);
}
 
开发者ID:gncloud,项目名称:fastcatsearch3,代码行数:20,代码来源:BasicHighlightAndSummary.java

示例14: testBulk

import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; //导入依赖的package包/类
@Test
public void testBulk() throws IOException {
	String str = "";
	str = "SK,  하이닉스";
	//str = "하이닉스";
	
	StringReader input = new StringReader(str);
	CSVAnalyzer analyzer = new CSVAnalyzer();
	TokenStream tokenStream = analyzer.tokenStream("", input);
	tokenStream.reset();
	logger.debug("tokenStream:{}", tokenStream);
	CharTermAttribute charTermAttribute = tokenStream.getAttribute(CharTermAttribute.class);
	OffsetAttribute offsetAttribute = tokenStream.getAttribute(OffsetAttribute.class);
	for(int inx=0;tokenStream.incrementToken();inx++) {
		String term = charTermAttribute.toString();
		logger.debug("[{}] \"{}\" {}~{}", inx, term, offsetAttribute.startOffset(), offsetAttribute.endOffset());
	}
	analyzer.close();
}
 
开发者ID:gncloud,项目名称:fastcatsearch3,代码行数:20,代码来源:CSVAnalyzerTest.java

示例15: PinyinTransformTokenFilter

import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; //导入依赖的package包/类
/**
 * @param input            词元输入
 * @param type             输出拼音缩写还是完整拼音 可取值:{@link #TYPE_ABBREVIATION}、{@link #TYPE_PINYIN}、{@link #TYPE_BOTH}
 * @param minTermLength    中文词组过滤长度
 * @param maxPolyphoneFreq 多音字出现最大次数
 * @param isOutChinese     是否输入原中文词元
 */
public PinyinTransformTokenFilter(TokenStream input, int type,
                                  int minTermLength, int maxPolyphoneFreq, boolean isOutChinese) {
    super(input);
    this._minTermLength = minTermLength;
    this.maxPolyphoneFreq = maxPolyphoneFreq;
    if (this._minTermLength < 1) {
        this._minTermLength = 1;
    }
    if (this.maxPolyphoneFreq < 1) {
        this.maxPolyphoneFreq = Integer.MAX_VALUE;
    }
    this.isOutChinese = isOutChinese;
    this.outputFormat.setCaseType(HanyuPinyinCaseType.LOWERCASE);
    this.outputFormat.setToneType(HanyuPinyinToneType.WITHOUT_TONE);
    this.type = type;
    addAttribute(OffsetAttribute.class); // 偏移量属性
}
 
开发者ID:dowenliu-xyz,项目名称:lucene-token-filter-pinyin,代码行数:25,代码来源:PinyinTransformTokenFilter.java


注:本文中的org.apache.lucene.analysis.tokenattributes.OffsetAttribute类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。