当前位置: 首页>>代码示例>>Java>>正文


Java AttributeFactory类代码示例

本文整理汇总了Java中org.apache.lucene.util.AttributeFactory的典型用法代码示例。如果您正苦于以下问题:Java AttributeFactory类的具体用法?Java AttributeFactory怎么用?Java AttributeFactory使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


AttributeFactory类属于org.apache.lucene.util包,在下文中一共展示了AttributeFactory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: super

import org.apache.lucene.util.AttributeFactory; //导入依赖的package包/类
public PathHierarchyTokenizer
    (AttributeFactory factory, Reader input, int bufferSize, char delimiter, char replacement, int skip) {
  super(factory, input);
  if (bufferSize < 0) {
    throw new IllegalArgumentException("bufferSize cannot be negative");
  }
  if (skip < 0) {
    throw new IllegalArgumentException("skip cannot be negative");
  }
  termAtt.resizeBuffer(bufferSize);

  this.delimiter = delimiter;
  this.replacement = replacement;
  this.skip = skip;
  resultToken = new StringBuilder(bufferSize);
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:17,代码来源:PathHierarchyTokenizer.java

示例2: super

import org.apache.lucene.util.AttributeFactory; //导入依赖的package包/类
public ReversePathHierarchyTokenizer
    (AttributeFactory factory, Reader input, int bufferSize, char delimiter, char replacement, int skip) {
  super(factory, input);
  if (bufferSize < 0) {
    throw new IllegalArgumentException("bufferSize cannot be negative");
  }
  if (skip < 0) {
    throw new IllegalArgumentException("skip cannot be negative");
  }
  termAtt.resizeBuffer(bufferSize);
  this.delimiter = delimiter;
  this.replacement = replacement;
  this.skip = skip;
  resultToken = new StringBuilder(bufferSize);
  resultTokenBuffer = new char[bufferSize];
  delimiterPositions = new ArrayList<>(bufferSize/10);
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:18,代码来源:ReversePathHierarchyTokenizer.java

示例3: IKTokenizer

import org.apache.lucene.util.AttributeFactory; //导入依赖的package包/类
public IKTokenizer(AttributeFactory factory, boolean useSmart){
	super(factory);
	offsetAtt = addAttribute(OffsetAttribute.class);
	termAtt = addAttribute(CharTermAttribute.class);
	typeAtt = addAttribute(TypeAttribute.class);
	_IKImplement = new IKSegmenter(input , useSmart);
}
 
开发者ID:navis87,项目名称:IKAnalyzer,代码行数:8,代码来源:IKTokenizer.java

示例4: PatternTokenizer

import org.apache.lucene.util.AttributeFactory; //导入依赖的package包/类
/** creates a new PatternTokenizer returning tokens from group (-1 for split functionality) */
public PatternTokenizer(AttributeFactory factory, Reader input, Pattern pattern, int group) {
  super(factory, input);
  this.group = group;

  // Use "" instead of str so don't consume chars
  // (fillBuffer) from the input on throwing IAE below:
  matcher = pattern.matcher("");

  // confusingly group count depends ENTIRELY on the pattern but is only accessible via matcher
  if (group >= 0 && group > matcher.groupCount()) {
    throw new IllegalArgumentException("invalid group specified: pattern only has: " + matcher.groupCount() + " capturing groups");
  }
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:15,代码来源:PatternTokenizer.java

示例5: create

import org.apache.lucene.util.AttributeFactory; //导入依赖的package包/类
@Override
public Tokenizer create(AttributeFactory factory, Reader input) {
  if (luceneMatchVersion.onOrAfter(Version.LUCENE_4_4_0)) {
    if (!EdgeNGramTokenFilter.Side.FRONT.getLabel().equals(side)) {
      throw new IllegalArgumentException(EdgeNGramTokenizer.class.getSimpleName() + " does not support backward n-grams as of Lucene 4.4");
    }
    return new EdgeNGramTokenizer(input, minGramSize, maxGramSize);
  } else {
    return new Lucene43EdgeNGramTokenizer(luceneMatchVersion, input, side, minGramSize, maxGramSize);
  }
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:12,代码来源:EdgeNGramTokenizerFactory.java

示例6: create

import org.apache.lucene.util.AttributeFactory; //导入依赖的package包/类
/** Creates the {@link TokenStream} of n-grams from the given {@link Reader} and {@link AttributeFactory}. */
@Override
public Tokenizer create(AttributeFactory factory, Reader input) {
  if (luceneMatchVersion.onOrAfter(Version.LUCENE_4_4_0)) {
    return new NGramTokenizer(luceneMatchVersion, factory, input, minGramSize, maxGramSize);
  } else {
    return new Lucene43NGramTokenizer(factory, input, minGramSize, maxGramSize);
  }
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:10,代码来源:NGramTokenizerFactory.java

示例7: NumericTokenStream

import org.apache.lucene.util.AttributeFactory; //导入依赖的package包/类
/**
 * Expert: Creates a token stream for numeric values with the specified
 * <code>precisionStep</code> using the given
 * {@link org.apache.lucene.util.AttributeFactory}.
 * The stream is not yet initialized,
 * before using set a value using the various set<em>???</em>Value() methods.
 */
public NumericTokenStream(AttributeFactory factory, final int precisionStep) {
  super(new NumericAttributeFactory(factory));
  if (precisionStep < 1)
    throw new IllegalArgumentException("precisionStep must be >=1");
  this.precisionStep = precisionStep;
  numericAtt.setShift(-precisionStep);
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:15,代码来源:NumericTokenStream.java

示例8: create

import org.apache.lucene.util.AttributeFactory; //导入依赖的package包/类
@Override
public StandardTokenizer create(AttributeFactory factory, Reader input) {
  StandardTokenizer tokenizer;
  if (luceneMatchVersion == null) {
    tokenizer = new StandardTokenizer(factory, input);
  } else {
    tokenizer = new StandardTokenizer(luceneMatchVersion, factory, input);
  }
  tokenizer.setMaxTokenLength(maxTokenLength);
  return tokenizer;
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:12,代码来源:StandardTokenizerFactory.java

示例9: create

import org.apache.lucene.util.AttributeFactory; //导入依赖的package包/类
@Override
public ClassicTokenizer create(AttributeFactory factory, Reader input) {
  ClassicTokenizer tokenizer;
  if (luceneMatchVersion == null) {
    tokenizer = new ClassicTokenizer(factory, input);
  } else {
    tokenizer = new ClassicTokenizer(luceneMatchVersion, factory, input);
  }
  tokenizer.setMaxTokenLength(maxTokenLength);
  return tokenizer;
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:12,代码来源:ClassicTokenizerFactory.java

示例10: create

import org.apache.lucene.util.AttributeFactory; //导入依赖的package包/类
@Override
public UAX29URLEmailTokenizer create(AttributeFactory factory, Reader input) {
  UAX29URLEmailTokenizer tokenizer;
  if (luceneMatchVersion == null) {
    tokenizer = new UAX29URLEmailTokenizer(factory, input);
  } else {
    tokenizer = new UAX29URLEmailTokenizer(luceneMatchVersion, factory, input);
  }
  tokenizer.setMaxTokenLength(maxTokenLength);
  return tokenizer;
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:12,代码来源:UAX29URLEmailTokenizerFactory.java

示例11: Tokenizer

import org.apache.lucene.util.AttributeFactory; //导入依赖的package包/类
/** Construct a token stream processing the given input using the given AttributeFactory. */
protected Tokenizer(AttributeFactory factory, Reader input) {
  super(factory);
  if (input == null) {
    throw new NullPointerException("input must not be null");
  }
  this.inputPending = input;
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:9,代码来源:Tokenizer.java

示例12: ThaiTokenizer

import org.apache.lucene.util.AttributeFactory; //导入依赖的package包/类
/** Creates a new ThaiTokenizer, supplying the AttributeFactory */
public ThaiTokenizer(AttributeFactory factory, Reader reader) {
  super(factory, reader, (BreakIterator)sentenceProto.clone());
  if (!DBBI_AVAILABLE) {
    throw new UnsupportedOperationException("This JRE does not have support for Thai segmentation");
  }
  wordBreaker = (BreakIterator)proto.clone();
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:9,代码来源:ThaiTokenizer.java

示例13: CharTokenizer

import org.apache.lucene.util.AttributeFactory; //导入依赖的package包/类
/**
 * @deprecated Use {@link #CharTokenizer(AttributeFactory, Reader)}
 */
@Deprecated
public CharTokenizer(Version matchVersion, AttributeFactory factory,
    Reader input) {
  super(factory, input);
  charUtils = CharacterUtils.getInstance(matchVersion);
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:10,代码来源:CharTokenizer.java

示例14: create

import org.apache.lucene.util.AttributeFactory; //导入依赖的package包/类
@Override
public WhitespaceTokenizer create(AttributeFactory factory, Reader input) {
  if (luceneMatchVersion == null) {
    return new WhitespaceTokenizer(factory, input);
  }
  return new WhitespaceTokenizer(luceneMatchVersion, factory, input);
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:8,代码来源:WhitespaceTokenizerFactory.java

示例15: KeywordTokenizer

import org.apache.lucene.util.AttributeFactory; //导入依赖的package包/类
public KeywordTokenizer(AttributeFactory factory, Reader input, int bufferSize) {
  super(factory, input);
  if (bufferSize <= 0) {
    throw new IllegalArgumentException("bufferSize must be > 0");
  }
  termAtt.resizeBuffer(bufferSize);
}
 
开发者ID:lamsfoundation,项目名称:lams,代码行数:8,代码来源:KeywordTokenizer.java


注:本文中的org.apache.lucene.util.AttributeFactory类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。