當前位置: 首頁>>代碼示例>>Java>>正文


Java Token.setPositionIncrement方法代碼示例

本文整理匯總了Java中org.apache.lucene.analysis.Token.setPositionIncrement方法的典型用法代碼示例。如果您正苦於以下問題:Java Token.setPositionIncrement方法的具體用法?Java Token.setPositionIncrement怎麽用?Java Token.setPositionIncrement使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.lucene.analysis.Token的用法示例。


在下文中一共展示了Token.setPositionIncrement方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testCountPositions

import org.apache.lucene.analysis.Token; //導入方法依賴的package包/類
public void testCountPositions() throws IOException {
    // We're looking to make sure that we:
    Token t1 = new Token();      // Don't count tokens without an increment
    t1.setPositionIncrement(0);
    Token t2 = new Token();
    t2.setPositionIncrement(1);  // Count normal tokens with one increment
    Token t3 = new Token();
    t2.setPositionIncrement(2);  // Count funny tokens with more than one increment
    int finalTokenIncrement = 4; // Count the final token increment on the rare token streams that have them
    Token[] tokens = new Token[] {t1, t2, t3};
    Collections.shuffle(Arrays.asList(tokens), random());
    final TokenStream tokenStream = new CannedTokenStream(finalTokenIncrement, 0, tokens);
    // TODO: we have no CannedAnalyzer?
    Analyzer analyzer = new Analyzer() {
            @Override
            public TokenStreamComponents createComponents(String fieldName) {
                return new TokenStreamComponents(new MockTokenizer(), tokenStream);
            }
        };
    assertThat(TokenCountFieldMapper.countPositions(analyzer, "", ""), equalTo(7));
}
 
開發者ID:justor,項目名稱:elasticsearch_my,代碼行數:22,代碼來源:TokenCountFieldMapperTests.java

示例2: testIllegalPositions

import org.apache.lucene.analysis.Token; //導入方法依賴的package包/類
public void testIllegalPositions() throws Exception {
  Directory dir = newDirectory();
  IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
  Document doc = new Document();
  Token t1 = new Token("foo", 0, 3);
  t1.setPositionIncrement(Integer.MAX_VALUE);
  Token t2 = new Token("bar", 4, 7);
  t2.setPositionIncrement(200);
  TokenStream overflowingTokenStream = new CannedTokenStream(
      new Token[] { t1, t2 }
  );
  Field field = new TextField("foo", overflowingTokenStream);
  doc.add(field);
  try {
    iw.addDocument(doc);
    fail();
  } catch (IllegalArgumentException expected) {
    // expected exception
  }
  iw.close();
  dir.close();
}
 
開發者ID:europeana,項目名稱:search,代碼行數:23,代碼來源:TestIndexWriterExceptions.java

示例3: testLegalbutVeryLargePositions

import org.apache.lucene.analysis.Token; //導入方法依賴的package包/類
public void testLegalbutVeryLargePositions() throws Exception {
  Directory dir = newDirectory();
  IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
  Document doc = new Document();
  Token t1 = new Token("foo", 0, 3);
  t1.setPositionIncrement(Integer.MAX_VALUE-500);
  if (random().nextBoolean()) {
    t1.setPayload(new BytesRef(new byte[] { 0x1 } ));
  }
  TokenStream overflowingTokenStream = new CannedTokenStream(
      new Token[] { t1 }
  );
  Field field = new TextField("foo", overflowingTokenStream);
  doc.add(field);
  iw.addDocument(doc);
  iw.close();
  dir.close();
}
 
開發者ID:europeana,項目名稱:search,代碼行數:19,代碼來源:TestIndexWriterExceptions.java

示例4: analyze

import org.apache.lucene.analysis.Token; //導入方法依賴的package包/類
protected void analyze(Collection<Token> result, String text, int offset, int flagsAttValue) throws IOException {
  TokenStream stream = analyzer.tokenStream("", text);
  // TODO: support custom attributes
  CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);
  TypeAttribute typeAtt = stream.addAttribute(TypeAttribute.class);
  PayloadAttribute payloadAtt = stream.addAttribute(PayloadAttribute.class);
  PositionIncrementAttribute posIncAtt = stream.addAttribute(PositionIncrementAttribute.class);
  OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class);
  stream.reset();
  while (stream.incrementToken()) {      
    Token token = new Token();
    token.copyBuffer(termAtt.buffer(), 0, termAtt.length());
    token.setOffset(offset + offsetAtt.startOffset(), 
                    offset + offsetAtt.endOffset());
    token.setFlags(flagsAttValue); //overwriting any flags already set...
    token.setType(typeAtt.type());
    token.setPayload(payloadAtt.getPayload());
    token.setPositionIncrement(posIncAtt.getPositionIncrement());
    result.add(token);
  }
  stream.end();
  stream.close();
}
 
開發者ID:europeana,項目名稱:search,代碼行數:24,代碼來源:SpellingQueryConverter.java

示例5: mergeTokenStream

import org.apache.lucene.analysis.Token; //導入方法依賴的package包/類
private List<Token> mergeTokenStream(Map<Integer, List<Token>> tokenPosMap) {
    List<Token> rsList = Lists.newLinkedList();

    int prevPos = 0;
    for (int pos : tokenPosMap.keySet()) {
        int tokenIncIndex = rsList.size();
        List<Token> tokens = tokenPosMap.get(pos);
        for (Token token : tokens) {
            token.setPositionIncrement(0);
            rsList.add(token);
        }

        if (rsList.size() > tokenIncIndex && null != rsList.get(tokenIncIndex)) {
            rsList.get(tokenIncIndex).setPositionIncrement(pos - prevPos);
        }
        prevPos = pos;
    }
    return rsList;
}
 
開發者ID:smalldirector,項目名稱:solr-multilingual-analyzer,代碼行數:20,代碼來源:MultiLangTokenizer.java

示例6: getNextPrefixInputToken

import org.apache.lucene.analysis.Token; //導入方法依賴的package包/類
private Token getNextPrefixInputToken(Token token) throws IOException {
  if (!prefix.incrementToken()) return null;
  token.copyBuffer(p_termAtt.buffer(), 0, p_termAtt.length());
  token.setPositionIncrement(p_posIncrAtt.getPositionIncrement());
  token.setFlags(p_flagsAtt.getFlags());
  token.setOffset(p_offsetAtt.startOffset(), p_offsetAtt.endOffset());
  token.setType(p_typeAtt.type());
  token.setPayload(p_payloadAtt.getPayload());
  return token;
}
 
開發者ID:lamsfoundation,項目名稱:lams,代碼行數:11,代碼來源:PrefixAwareTokenFilter.java

示例7: getNextSuffixInputToken

import org.apache.lucene.analysis.Token; //導入方法依賴的package包/類
private Token getNextSuffixInputToken(Token token) throws IOException {
  if (!suffix.incrementToken()) return null;
  token.copyBuffer(termAtt.buffer(), 0, termAtt.length());
  token.setPositionIncrement(posIncrAtt.getPositionIncrement());
  token.setFlags(flagsAtt.getFlags());
  token.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
  token.setType(typeAtt.type());
  token.setPayload(payloadAtt.getPayload());
  return token;
}
 
開發者ID:lamsfoundation,項目名稱:lams,代碼行數:11,代碼來源:PrefixAwareTokenFilter.java

示例8: Token

import org.apache.lucene.analysis.Token; //導入方法依賴的package包/類
private static Token createToken
  (String term, int start, int offset, int positionIncrement)
{
  Token token = new Token();
  token.setOffset(start, offset);
  token.copyBuffer(term.toCharArray(), 0, term.length());
  token.setPositionIncrement(positionIncrement);
  return token;
}
 
開發者ID:europeana,項目名稱:search,代碼行數:10,代碼來源:ShingleFilterTest.java

示例9: makeToken

import org.apache.lucene.analysis.Token; //導入方法依賴的package包/類
private Token makeToken(String text, int posIncr, int startOffset, int endOffset) {
  final Token t = new Token();
  t.append(text);
  t.setPositionIncrement(posIncr);
  t.setOffset(startOffset, endOffset);
  return t;
}
 
開發者ID:europeana,項目名稱:search,代碼行數:8,代碼來源:TestPostingsOffsets.java

示例10: convert

import org.apache.lucene.analysis.Token; //導入方法依賴的package包/類
@Override
public Collection<Token> convert(String origQuery) {
  Collection<Token> result = new HashSet<>();
  WhitespaceAnalyzer analyzer = new WhitespaceAnalyzer();
  
  TokenStream ts = null;
  try {
    ts = analyzer.tokenStream("", origQuery);
    // TODO: support custom attributes
    CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
    OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
    TypeAttribute typeAtt = ts.addAttribute(TypeAttribute.class);
    FlagsAttribute flagsAtt = ts.addAttribute(FlagsAttribute.class);
    PayloadAttribute payloadAtt = ts.addAttribute(PayloadAttribute.class);
    PositionIncrementAttribute posIncAtt = ts.addAttribute(PositionIncrementAttribute.class);

    ts.reset();

    while (ts.incrementToken()) {
      Token tok = new Token();
      tok.copyBuffer(termAtt.buffer(), 0, termAtt.length());
      tok.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
      tok.setFlags(flagsAtt.getFlags());
      tok.setPayload(payloadAtt.getPayload());
      tok.setPositionIncrement(posIncAtt.getPositionIncrement());
      tok.setType(typeAtt.type());
      result.add(tok);
    }
    ts.end();      
    return result;
  } catch (IOException e) {
    throw new RuntimeException(e);
  } finally {
    IOUtils.closeWhileHandlingException(ts);
  }
}
 
開發者ID:europeana,項目名稱:search,代碼行數:37,代碼來源:SimpleQueryConverter.java

示例11: token

import org.apache.lucene.analysis.Token; //導入方法依賴的package包/類
private static Token token(String term, int posInc, int posLength, int startOffset, int endOffset) {
    final Token t = new Token(term, startOffset, endOffset);
    t.setPositionIncrement(posInc);
    t.setPositionLength(posLength);
    return t;
}
 
開發者ID:justor,項目名稱:elasticsearch_my,代碼行數:7,代碼來源:FlattenGraphTokenFilterFactoryTests.java

示例12: testBooleanPhraseWithSynonym

import org.apache.lucene.analysis.Token; //導入方法依賴的package包/類
public void testBooleanPhraseWithSynonym() throws IOException {
  Directory dir = newDirectory();
  IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
  Document doc = new Document();
  FieldType type = new FieldType(TextField.TYPE_NOT_STORED);
  type.setStoreTermVectorOffsets(true);
  type.setStoreTermVectorPositions(true);
  type.setStoreTermVectors(true);
  type.freeze();
  Token syn = new Token("httpwwwfacebookcom", 6, 29);
  syn.setPositionIncrement(0);
  CannedTokenStream ts = new CannedTokenStream(
      new Token("test", 0, 4),
      new Token("http", 6, 10),
      syn,
      new Token("www", 13, 16),
      new Token("facebook", 17, 25),
      new Token("com", 26, 29)
  );
  Field field = new Field("field", ts, type);
  doc.add(field);
  doc.add(new StoredField("field", "Test: http://www.facebook.com"));
  writer.addDocument(doc);
  FastVectorHighlighter highlighter = new FastVectorHighlighter();
  
  IndexReader reader = DirectoryReader.open(writer, true);
  int docId = 0;
  
  // query1: match
  PhraseQuery pq = new PhraseQuery();
  pq.add(new Term("field", "test"));
  pq.add(new Term("field", "http"));
  pq.add(new Term("field", "www"));
  pq.add(new Term("field", "facebook"));
  pq.add(new Term("field", "com"));
  FieldQuery fieldQuery  = highlighter.getFieldQuery(pq, reader);
  String[] bestFragments = highlighter.getBestFragments(fieldQuery, reader, docId, "field", 54, 1);
  assertEquals("<b>Test: http://www.facebook.com</b>", bestFragments[0]);
  
  // query2: match
  PhraseQuery pq2 = new PhraseQuery();
  pq2.add(new Term("field", "test"));
  pq2.add(new Term("field", "httpwwwfacebookcom"));
  pq2.add(new Term("field", "www"));
  pq2.add(new Term("field", "facebook"));
  pq2.add(new Term("field", "com"));
  fieldQuery  = highlighter.getFieldQuery(pq2, reader);
  bestFragments = highlighter.getBestFragments(fieldQuery, reader, docId, "field", 54, 1);
  assertEquals("<b>Test: http://www.facebook.com</b>", bestFragments[0]);
  
  // query3: OR query1 and query2 together
  BooleanQuery bq = new BooleanQuery();
  bq.add(pq, BooleanClause.Occur.SHOULD);
  bq.add(pq2, BooleanClause.Occur.SHOULD);
  fieldQuery  = highlighter.getFieldQuery(bq, reader);
  bestFragments = highlighter.getBestFragments(fieldQuery, reader, docId, "field", 54, 1);
  assertEquals("<b>Test: http://www.facebook.com</b>", bestFragments[0]);
  
  reader.close();
  writer.close();
  dir.close();
}
 
開發者ID:europeana,項目名稱:search,代碼行數:63,代碼來源:FastVectorHighlighterTest.java

示例13: token

import org.apache.lucene.analysis.Token; //導入方法依賴的package包/類
private static Token token( String term, int posInc, int startOffset, int endOffset ) {
  Token t = new Token( term, startOffset, endOffset );
  t.setPositionIncrement( posInc );
  return t;
}
 
開發者ID:europeana,項目名稱:search,代碼行數:6,代碼來源:FastVectorHighlighterTest.java

示例14: token

import org.apache.lucene.analysis.Token; //導入方法依賴的package包/類
private static Token token(String term, int posInc, int posLength) {
  final Token t = new Token(term, 0, 0);
  t.setPositionIncrement(posInc);
  t.setPositionLength(posLength);
  return t;
}
 
開發者ID:europeana,項目名稱:search,代碼行數:7,代碼來源:FuzzySuggesterTest.java

示例15: token

import org.apache.lucene.analysis.Token; //導入方法依賴的package包/類
private static Token token(String term, int posInc, int posLength) {
  final Token t = new Token(term, 0, term.length());
  t.setPositionIncrement(posInc);
  t.setPositionLength(posLength);
  return t;
}
 
開發者ID:europeana,項目名稱:search,代碼行數:7,代碼來源:TestTermAutomatonQuery.java


注:本文中的org.apache.lucene.analysis.Token.setPositionIncrement方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。