本文整理汇总了C#中IOffsetAttribute类的典型用法代码示例。如果您正苦于以下问题:C# IOffsetAttribute类的具体用法?C# IOffsetAttribute怎么用?C# IOffsetAttribute使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
IOffsetAttribute类属于命名空间,在下文中一共展示了IOffsetAttribute类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Init
private void Init(int bufferSize)
{
this.done = false;
termAtt = AddAttribute<ITermAttribute>();
offsetAtt = AddAttribute<IOffsetAttribute>();
termAtt.ResizeTermBuffer(bufferSize);
}
示例2: CamelCaseFilter
public CamelCaseFilter(TokenStream stream)
: base(stream)
{
_termAttribute = AddAttribute<ITermAttribute>();
_offsetAttribute = AddAttribute<IOffsetAttribute>();
_positionIncrementAttribute = AddAttribute<IPositionIncrementAttribute>();
}
示例3: IKTokenizer
///<summary>
///Lucene Tokenizer适配器类构造函数
/// </summary>
/// <param name="isMaxWordLength">当为true时,分词器进行最大词长切分;当为false是,采用最细粒度切分</param>
public IKTokenizer(TextReader inreader, bool isMaxWordLength)
: base(inreader)
{
offsetAtt = AddAttribute<IOffsetAttribute>();
termAtt = AddAttribute<ITermAttribute>();
_IKImplement = new IKSegmentation(inreader, isMaxWordLength);
}
示例4: Init
void Init()
{
InitPanGuSegment();
termAtt = AddAttribute<ITermAttribute>();
offsetAtt = AddAttribute<IOffsetAttribute>();
posIncrAtt = AddAttribute<IPositionIncrementAttribute>();
typeAtt = AddAttribute<ITypeAttribute>();
}
示例5: CutLeterDigitFilter
public CutLeterDigitFilter(TokenStream input)
: base(input)
{
reusableToken = new Token();
termAtt = AddAttribute<ITermAttribute>();
offsetAtt = AddAttribute<IOffsetAttribute>();
typeAtt = AddAttribute<ITypeAttribute>();
}
示例6: Init
void Init(string content)
{
_enumerationPositions = _aufzaehlungDetector.FindAufzaehlungsspans(content).ToArray();
_offsetAttribute = AddAttribute<IOffsetAttribute>();
_flagsAttribute = AddAttribute<IFlagsAttribute>();
_isInitialized = true;
}
示例7: GraphTokenizer
public GraphTokenizer(TextReader input)
: base(input)
{
TermAtt = AddAttribute<ICharTermAttribute>();
OffsetAtt = AddAttribute<IOffsetAttribute>();
PosIncrAtt = AddAttribute<IPositionIncrementAttribute>();
PosLengthAtt = AddAttribute<IPositionLengthAttribute>();
}
示例8: MMSegTokenizer
/*
* 此处忽略调用base(input);因调用后input的position会被移动
* by zh
*/
public MMSegTokenizer(Seg seg, TextReader input)
: base(input)
{
mmSeg = new MMSeg(input, seg);
termAtt = AddAttribute<ITermAttribute>();
offsetAtt = AddAttribute<IOffsetAttribute>();
typeAtt = AddAttribute<ITypeAttribute>();
}
示例9: SuggestStopFilter
/// <summary>
/// Sole constructor. </summary>
public SuggestStopFilter(TokenStream input, CharArraySet stopWords)
: base(input)
{
this.stopWords = stopWords;
this.termAtt = AddAttribute<ICharTermAttribute>();
this.posIncAtt = AddAttribute<IPositionIncrementAttribute>();
this.keywordAtt = AddAttribute<IKeywordAttribute>();
this.offsetAtt = AddAttribute<IOffsetAttribute>();
}
示例10: CannedBinaryTokenStream
public CannedBinaryTokenStream(params BinaryToken[] tokens)
: base()
{
this.Tokens = tokens;
TermAtt = AddAttribute<IBinaryTermAttribute>();
PosIncrAtt = AddAttribute<IPositionIncrementAttribute>();
PosLengthAtt = AddAttribute<IPositionLengthAttribute>();
OffsetAtt = AddAttribute<IOffsetAttribute>();
}
示例11: Init
private void Init(System.IO.TextReader _input, HebMorph.DataStructures.DictRadix<int> _prefixesTree)
{
termAtt = AddAttribute <ITermAttribute>();
offsetAtt = AddAttribute <IOffsetAttribute>();
//posIncrAtt = (PositionIncrementAttribute)AddAttribute(typeof(PositionIncrementAttribute));
typeAtt = AddAttribute <ITypeAttribute>();
input = _input;
hebMorphTokenizer = new HebMorph.Tokenizer(_input);
prefixesTree = _prefixesTree;
}
示例12: JiebaTokenizer
public JiebaTokenizer(JiebaSegmenter seg, string input)
{
segmenter = seg;
termAtt = AddAttribute<ITermAttribute>();
offsetAtt = AddAttribute<IOffsetAttribute>();
typeAtt = AddAttribute<ITypeAttribute>();
var text = input;
tokens = segmenter.Tokenize(text, TokenizerMode.Search).ToList();
}
示例13: CommonGramsFilter
/// <summary>
/// Construct a token stream filtering the given input using a Set of common
/// words to create bigrams. Outputs both unigrams with position increment and
/// bigrams with position increment 0 type=gram where one or both of the words
/// in a potential bigram are in the set of common words .
/// </summary>
/// <param name="input"> TokenStream input in filter chain </param>
/// <param name="commonWords"> The set of common words. </param>
public CommonGramsFilter(LuceneVersion matchVersion, TokenStream input, CharArraySet commonWords)
: base(input)
{
termAttribute = AddAttribute<ICharTermAttribute>();
offsetAttribute = AddAttribute<IOffsetAttribute>();
typeAttribute = AddAttribute<ITypeAttribute>();
posIncAttribute = AddAttribute<IPositionIncrementAttribute>();
posLenAttribute = AddAttribute<IPositionLengthAttribute>();
this.commonWords = commonWords;
}
示例14: IterTokenStream
public IterTokenStream(params Token[] tokens)
: base()
{
this.tokens = tokens;
this.termAtt = AddAttribute<ICharTermAttribute>();
this.offsetAtt = AddAttribute<IOffsetAttribute>();
this.posIncAtt = AddAttribute<IPositionIncrementAttribute>();
this.flagsAtt = AddAttribute<IFlagsAttribute>();
this.typeAtt = AddAttribute<ITypeAttribute>();
this.payloadAtt = AddAttribute<IPayloadAttribute>();
}
示例15: ThaiTokenizer
/// <summary>
/// Creates a new ThaiTokenizer, supplying the AttributeFactory </summary>
public ThaiTokenizer(AttributeFactory factory, TextReader reader)
: base(factory, reader, BreakIterator.CreateSentenceInstance(Locale.GetUS()))
{
if (!DBBI_AVAILABLE)
{
throw new System.NotSupportedException("This JRE does not have support for Thai segmentation");
}
wordBreaker = new ThaiWordBreaker(BreakIterator.CreateWordInstance(Locale.GetUS()));
termAtt = AddAttribute<ICharTermAttribute>();
offsetAtt = AddAttribute<IOffsetAttribute>();
}