当前位置: 首页>>代码示例>>Java>>正文


Java TypeAttribute类代码示例

本文整理汇总了Java中org.apache.lucene.analysis.tokenattributes.TypeAttribute的典型用法代码示例。如果您正苦于以下问题:Java TypeAttribute类的具体用法?Java TypeAttribute怎么用?Java TypeAttribute使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


TypeAttribute类属于org.apache.lucene.analysis.tokenattributes包,在下文中一共展示了TypeAttribute类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: assertTokenStream

import org.apache.lucene.analysis.tokenattributes.TypeAttribute; //导入依赖的package包/类
public static void assertTokenStream(TokenStream tokenStream, String[] expectedCharTerms, String[] expectedTypes, int[] expectedStartOffsets, int[] expectedEndOffsets) throws IOException {
    tokenStream.reset();
    int index = 0;
    while (tokenStream.incrementToken() == true) {
        assertEquals(expectedCharTerms[index], tokenStream.getAttribute(CharTermAttribute.class).toString());

        if(expectedTypes != null) {
            assertEquals(expectedTypes[index], tokenStream.getAttribute(TypeAttribute.class).type());
        }

        OffsetAttribute offsets = tokenStream.getAttribute(OffsetAttribute.class);

        if(expectedStartOffsets != null) {
            assertEquals(expectedStartOffsets[index], offsets.startOffset());
        }

        if(expectedEndOffsets != null) {
            assertEquals(expectedEndOffsets[index], offsets.endOffset());
        }

        index++;
    }
    tokenStream.end();
}
 
开发者ID:open-korean-text,项目名称:elasticsearch-analysis-openkoreantext,代码行数:25,代码来源:TokenStreamAssertions.java

示例2: PrefixAwareTokenFilter

import org.apache.lucene.analysis.tokenattributes.TypeAttribute; //导入依赖的package包/类
public PrefixAwareTokenFilter(TokenStream prefix, TokenStream suffix) {
  super(suffix);
  this.suffix = suffix;
  this.prefix = prefix;
  prefixExhausted = false;
  
  termAtt = addAttribute(CharTermAttribute.class);
  posIncrAtt = addAttribute(PositionIncrementAttribute.class);
  payloadAtt = addAttribute(PayloadAttribute.class);
  offsetAtt = addAttribute(OffsetAttribute.class);
  typeAtt = addAttribute(TypeAttribute.class);
  flagsAtt = addAttribute(FlagsAttribute.class);

  p_termAtt = prefix.addAttribute(CharTermAttribute.class);
  p_posIncrAtt = prefix.addAttribute(PositionIncrementAttribute.class);
  p_payloadAtt = prefix.addAttribute(PayloadAttribute.class);
  p_offsetAtt = prefix.addAttribute(OffsetAttribute.class);
  p_typeAtt = prefix.addAttribute(TypeAttribute.class);
  p_flagsAtt = prefix.addAttribute(FlagsAttribute.class);
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:21,代码来源:PrefixAwareTokenFilter.java

示例3: LTPTokenizer

import org.apache.lucene.analysis.tokenattributes.TypeAttribute; //导入依赖的package包/类
/**
 * Lucene constructor
 *
 * @throws UnirestException
 * @throws JSONException
 * @throws IOException
 */
public LTPTokenizer(Set<String> filter)
        throws IOException, JSONException, UnirestException {
    super();
    logger.info("LTPTokenizer Initialize......");
    // Add token offset attribute
    offsetAttr = addAttribute(OffsetAttribute.class);
    // Add token content attribute
    charTermAttr = addAttribute(CharTermAttribute.class);
    // Add token type attribute
    typeAttr = addAttribute(TypeAttribute.class);
    // Add token position attribute
    piAttr = addAttribute(PositionIncrementAttribute.class);
    // Create a new word segmenter to get tokens
    LTPSeg = new LTPWordSegmenter(input);
    // Add filter words set
    this.filter = filter;
}
 
开发者ID:lonly197,项目名称:elasticsearch-analysis-ltp,代码行数:25,代码来源:LTPTokenizer.java

示例4: copyTo

import org.apache.lucene.analysis.tokenattributes.TypeAttribute; //导入依赖的package包/类
@Override
public void copyTo(AttributeImpl target) {
  if (target instanceof Token) {
    final Token to = (Token) target;
    to.reinit(this);
    // reinit shares the payload, so clone it:
    if (payload !=null) {
      to.payload = payload.clone();
    }
  } else {
    super.copyTo(target);
    ((OffsetAttribute) target).setOffset(startOffset, endOffset);
    ((PositionIncrementAttribute) target).setPositionIncrement(positionIncrement);
    ((PayloadAttribute) target).setPayload((payload == null) ? null : payload.clone());
    ((FlagsAttribute) target).setFlags(flags);
    ((TypeAttribute) target).setType(type);
  }
}
 
开发者ID:gncloud,项目名称:fastcatsearch3,代码行数:19,代码来源:Token.java

示例5: normalizeQueryTarget

import org.apache.lucene.analysis.tokenattributes.TypeAttribute; //导入依赖的package包/类
@Override
public BytesRef normalizeQueryTarget(String val, boolean strict, String fieldName, boolean appendExtraDelim) throws IOException {
  TokenStream ts = getQueryAnalyzer().tokenStream(fieldName, val);
  try {
    ts.reset();
    CharTermAttribute termAtt = ts.getAttribute(CharTermAttribute.class);
    TypeAttribute typeAtt = ts.getAttribute(TypeAttribute.class);
    String matchType = strict ? INDEXED_TOKEN_TYPE : NORMALIZED_TOKEN_TYPE;
    while (ts.incrementToken()) {
      if (matchType.equals(typeAtt.type())) {
        BytesRefBuilder ret = new BytesRefBuilder();
        ret.copyChars(termAtt.toString());
        if (!strict || appendExtraDelim) {
          ret.append(delimBytes, 0, delimBytes.length);
        }
        return ret.get();
      }
    }
    return new BytesRef(BytesRef.EMPTY_BYTES);
  } finally {
    ts.close();
  }
}
 
开发者ID:upenn-libraries,项目名称:solrplugins,代码行数:24,代码来源:CaseInsensitiveSortingTextField.java

示例6: testTypeForPayload1

import org.apache.lucene.analysis.tokenattributes.TypeAttribute; //导入依赖的package包/类
/** verify that payload gets picked up for 1st group of tokens */
public void testTypeForPayload1() throws IOException {
  TokenTypeJoinFilter ttjf = new TokenTypeJoinFilter(new TokenArrayTokenizer(tokensWithPayloads), new String[] {"normalized", "filing", "prefix"},
          "joined", "normalized", "!", false, false);
  CharTermAttribute termAtt = ttjf.getAttribute(CharTermAttribute.class);
  TypeAttribute typeAtt = ttjf.getAttribute(TypeAttribute.class);
  PayloadAttribute payloadAtt = ttjf.getAttribute(PayloadAttribute.class);
  ttjf.reset();

  assertTrue(ttjf.incrementToken());

  assertEquals("unconsoled!Unconsoled!The ", termAtt.toString());
  assertEquals("joined", typeAtt.type());
  assertEquals("payload1", payloadAtt.getPayload().utf8ToString());

  assertTrue(ttjf.incrementToken());

  assertEquals("room with a view!Room With A View!A ", termAtt.toString());
  assertEquals("joined", typeAtt.type());
  assertNull(payloadAtt.getPayload());

  assertFalse(ttjf.incrementToken());
}
 
开发者ID:upenn-libraries,项目名称:solrplugins,代码行数:24,代码来源:TokenTypeJoinFilterTest.java

示例7: testTypeForPayload2

import org.apache.lucene.analysis.tokenattributes.TypeAttribute; //导入依赖的package包/类
/** verify that payload gets picked up for 2nd group of tokens */
public void testTypeForPayload2() throws IOException {
  TokenTypeJoinFilter ttjf = new TokenTypeJoinFilter(new TokenArrayTokenizer(tokensWithPayloads), new String[] {"normalized", "filing", "prefix"},
          "joined", "filing", "!", false, false);
  CharTermAttribute termAtt = ttjf.getAttribute(CharTermAttribute.class);
  TypeAttribute typeAtt = ttjf.getAttribute(TypeAttribute.class);
  PayloadAttribute payloadAtt = ttjf.getAttribute(PayloadAttribute.class);
  ttjf.reset();

  assertTrue(ttjf.incrementToken());

  assertEquals("unconsoled!Unconsoled!The ", termAtt.toString());
  assertEquals("joined", typeAtt.type());
  assertNull(payloadAtt.getPayload());

  assertTrue(ttjf.incrementToken());

  assertEquals("room with a view!Room With A View!A ", termAtt.toString());
  assertEquals("joined", typeAtt.type());
  assertEquals("payload2", payloadAtt.getPayload().utf8ToString());

  assertFalse(ttjf.incrementToken());
}
 
开发者ID:upenn-libraries,项目名称:solrplugins,代码行数:24,代码来源:TokenTypeJoinFilterTest.java

示例8: testShorthand2

import org.apache.lucene.analysis.tokenattributes.TypeAttribute; //导入依赖的package包/类
@Test
public void testShorthand2() throws IOException {
  JsonReferencePayloadTokenizer tokenizer = new JsonReferencePayloadTokenizer();
  tokenizer.setReader(new StringReader("{\"filing\": \"something\", \"prefix\": \"The \"}"));
  tokenizer.reset();

  assertTrue(tokenizer.incrementToken());
  assertEquals("something", tokenizer.getAttribute(CharTermAttribute.class).toString());
  assertEquals(JsonReferencePayloadTokenizer.TYPE_FILING, tokenizer.getAttribute(TypeAttribute.class).type());
  assertEquals(1, tokenizer.getAttribute(PositionIncrementAttribute.class).getPositionIncrement());
  assertNull(tokenizer.getAttribute(PayloadAttribute.class).getPayload());

  assertTrue(tokenizer.incrementToken());
  assertEquals("The ", tokenizer.getAttribute(CharTermAttribute.class).toString());
  assertEquals(JsonReferencePayloadTokenizer.TYPE_PREFIX, tokenizer.getAttribute(TypeAttribute.class).type());
  assertEquals(0, tokenizer.getAttribute(PositionIncrementAttribute.class).getPositionIncrement());
  assertNull(tokenizer.getAttribute(PayloadAttribute.class).getPayload());

  assertFalse(tokenizer.incrementToken());
}
 
开发者ID:upenn-libraries,项目名称:solrplugins,代码行数:21,代码来源:JsonReferencePayloadTokenizerTest.java

示例9: testShorthand3

import org.apache.lucene.analysis.tokenattributes.TypeAttribute; //导入依赖的package包/类
@Test
public void testShorthand3() throws IOException {
  JsonReferencePayloadTokenizer tokenizer = new JsonReferencePayloadTokenizer();
  tokenizer.setReader(new StringReader("{\"prefix\": \"The \", \"filing\": \"something\"}"));
  tokenizer.reset();

  assertTrue(tokenizer.incrementToken());
  assertEquals("something", tokenizer.getAttribute(CharTermAttribute.class).toString());
  assertEquals(JsonReferencePayloadTokenizer.TYPE_FILING, tokenizer.getAttribute(TypeAttribute.class).type());
  assertEquals(1, tokenizer.getAttribute(PositionIncrementAttribute.class).getPositionIncrement());
  assertNull(tokenizer.getAttribute(PayloadAttribute.class).getPayload());

  assertTrue(tokenizer.incrementToken());
  assertEquals("The ", tokenizer.getAttribute(CharTermAttribute.class).toString());
  assertEquals(JsonReferencePayloadTokenizer.TYPE_PREFIX, tokenizer.getAttribute(TypeAttribute.class).type());
  assertEquals(0, tokenizer.getAttribute(PositionIncrementAttribute.class).getPositionIncrement());
  assertNull(tokenizer.getAttribute(PayloadAttribute.class).getPayload());

  assertFalse(tokenizer.incrementToken());
}
 
开发者ID:upenn-libraries,项目名称:solrplugins,代码行数:21,代码来源:JsonReferencePayloadTokenizerTest.java

示例10: tokensFromAnalysis

import org.apache.lucene.analysis.tokenattributes.TypeAttribute; //导入依赖的package包/类
public static MyToken[] tokensFromAnalysis(Analyzer analyzer, String text, String field) throws IOException
{
  ;
  TokenStream stream = analyzer.tokenStream(field, new StringReader(text));
  CharTermAttribute term = stream.addAttribute(CharTermAttribute.class);
  PositionIncrementAttribute positionIncrementAttr = stream.addAttribute(PositionIncrementAttribute.class);
  TypeAttribute typeAttr = stream.addAttribute(TypeAttribute.class);
  OffsetAttribute offsetAttr = stream.addAttribute(OffsetAttribute.class);

  ArrayList<MyToken> tokenList = new ArrayList<MyToken>();
  while (stream.incrementToken()) {
    tokenList.add(new MyToken(term.toString(), positionIncrementAttr.getPositionIncrement(), typeAttr.type(),
        offsetAttr.startOffset(), offsetAttr.endOffset()));
  }

  return tokenList.toArray(new MyToken[0]);
}
 
开发者ID:oeuvres,项目名称:Alix,代码行数:18,代码来源:Demo.java

示例11: testCreateComponents

import org.apache.lucene.analysis.tokenattributes.TypeAttribute; //导入依赖的package包/类
public void testCreateComponents() throws Exception
{
    String text = "中华人民共和国很辽阔";
    for (int i = 0; i < text.length(); ++i)
    {
        System.out.print(text.charAt(i) + "" + i + " ");
    }
    System.out.println();
    Analyzer analyzer = new HanLPAnalyzer();
    TokenStream tokenStream = analyzer.tokenStream("field", text);
    tokenStream.reset();
    while (tokenStream.incrementToken())
    {
        CharTermAttribute attribute = tokenStream.getAttribute(CharTermAttribute.class);
        // 偏移量
        OffsetAttribute offsetAtt = tokenStream.getAttribute(OffsetAttribute.class);
        // 距离
        PositionIncrementAttribute positionAttr = tokenStream.getAttribute(PositionIncrementAttribute.class);
        // 词性
        TypeAttribute typeAttr = tokenStream.getAttribute(TypeAttribute.class);
        System.out.printf("[%d:%d %d] %s/%s\n", offsetAtt.startOffset(), offsetAtt.endOffset(), positionAttr.getPositionIncrement(), attribute, typeAttr.type());
    }
}
 
开发者ID:hankcs,项目名称:hanlp-lucene-plugin,代码行数:24,代码来源:HanLPAnalyzerTest.java

示例12: testIssue

import org.apache.lucene.analysis.tokenattributes.TypeAttribute; //导入依赖的package包/类
public void testIssue() throws Exception
{
    Map<String, String> args = new TreeMap<>();
    args.put("enableTraditionalChineseMode", "true");
    args.put("enableNormalization", "true");
    HanLPTokenizerFactory factory = new HanLPTokenizerFactory(args);
    Tokenizer tokenizer = factory.create();
    String text = "會辦台星保證最低價的原因?";

    tokenizer.setReader(new StringReader(text));
    tokenizer.reset();
    while (tokenizer.incrementToken())
    {
        CharTermAttribute attribute = tokenizer.getAttribute(CharTermAttribute.class);
        // 偏移量
        OffsetAttribute offsetAtt = tokenizer.getAttribute(OffsetAttribute.class);
        // 距离
        PositionIncrementAttribute positionAttr = tokenizer.getAttribute(PositionIncrementAttribute.class);
        // 词性
        TypeAttribute typeAttr = tokenizer.getAttribute(TypeAttribute.class);
        System.out.printf("[%d:%d %d] %s/%s\n", offsetAtt.startOffset(), offsetAtt.endOffset(), positionAttr.getPositionIncrement(), attribute, typeAttr.type());
    }
}
 
开发者ID:hankcs,项目名称:hanlp-lucene-plugin,代码行数:24,代码来源:HanLPAnalyzerTest.java

示例13: testCreateComponents

import org.apache.lucene.analysis.tokenattributes.TypeAttribute; //导入依赖的package包/类
public void testCreateComponents() throws Exception
{
    String text = "中华人民共和国很辽阔";
    for (int i = 0; i < text.length(); ++i)
    {
        System.out.print(text.charAt(i) + "" + i + " ");
    }
    System.out.println();
    Analyzer analyzer = new HanLPIndexAnalyzer();
    TokenStream tokenStream = analyzer.tokenStream("field", text);
    tokenStream.reset();
    while (tokenStream.incrementToken())
    {
        CharTermAttribute attribute = tokenStream.getAttribute(CharTermAttribute.class);
        // 偏移量
        OffsetAttribute offsetAtt = tokenStream.getAttribute(OffsetAttribute.class);
        // 距离
        PositionIncrementAttribute positionAttr = tokenStream.getAttribute(PositionIncrementAttribute.class);
        // 词性
        TypeAttribute typeAttr = tokenStream.getAttribute(TypeAttribute.class);
        System.out.printf("[%d:%d %d] %s/%s\n", offsetAtt.startOffset(), offsetAtt.endOffset(), positionAttr.getPositionIncrement(), attribute, typeAttr.type());
    }
}
 
开发者ID:hankcs,项目名称:hanlp-lucene-plugin,代码行数:24,代码来源:HanLPIndexAnalyzerTest.java

示例14: tokenize

import org.apache.lucene.analysis.tokenattributes.TypeAttribute; //导入依赖的package包/类
/**
 * Tokenize the given input using a {@link URLTokenizer}. Settings which have been set on this {@link URLTokenFilter}
 * will be passed along to the tokenizer.
 * @param input a string to be tokenized
 * @return a list of tokens extracted from the input string
 * @throws IOException
 */
private List<Token> tokenize(String input) throws IOException {
    List<Token> tokens = new ArrayList<>();
    URLTokenizer tokenizer = new URLTokenizer();
    // create a copy of the parts list to avoid ConcurrentModificationException when sorting
    tokenizer.setParts(new ArrayList<>(parts));
    tokenizer.setUrlDecode(urlDeocde);
    tokenizer.setTokenizeHost(tokenizeHost);
    tokenizer.setTokenizePath(tokenizePath);
    tokenizer.setTokenizeQuery(tokenizeQuery);
    tokenizer.setAllowMalformed(allowMalformed || passthrough);
    tokenizer.setTokenizeMalformed(tokenizeMalformed);
    tokenizer.setReader(new StringReader(input));
    tokenizer.reset();

    String term;
    URLPart part;
    OffsetAttribute offset;
    while (tokenizer.incrementToken()) {
        term = tokenizer.getAttribute(CharTermAttribute.class).toString();
        part = URLPart.fromString(tokenizer.getAttribute(TypeAttribute.class).type());
        offset = tokenizer.getAttribute(OffsetAttribute.class);
        tokens.add(new Token(term, part, offset.startOffset(), offset.endOffset()));
    }
    return tokens;
}
 
开发者ID:jlinn,项目名称:elasticsearch-analysis-url,代码行数:33,代码来源:URLTokenFilter.java

示例15: collectExtractedNouns

import org.apache.lucene.analysis.tokenattributes.TypeAttribute; //导入依赖的package包/类
protected List<TestToken> collectExtractedNouns(TokenStream stream) throws IOException {
    CharTermAttribute charTermAtt = stream.addAttribute(CharTermAttribute.class);
    OffsetAttribute offSetAtt = stream.addAttribute(OffsetAttribute.class);
    TypeAttribute typeAttr = stream.addAttribute(TypeAttribute.class);

    List<TestToken> extractedTokens = Lists.newArrayList();

    while(stream.incrementToken()) {
        TestToken t = getToken(charTermAtt.toString(), offSetAtt.startOffset(), offSetAtt.endOffset());

        System.out.println("termAtt.term() : " + charTermAtt.toString());
        System.out.println("startoffSetAtt : " + offSetAtt.startOffset());
        System.out.println("endoffSetAtt : " + offSetAtt.endOffset());
        System.out.println("typeAttr : " + typeAttr.toString());

        extractedTokens.add(t);
    }

    return extractedTokens;
}
 
开发者ID:jobplanet,项目名称:elasticsearch-twitter-korean,代码行数:21,代码来源:AnalyzerTestUtil.java


注:本文中的org.apache.lucene.analysis.tokenattributes.TypeAttribute类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。