本文整理汇总了C#中LuceneVersion类的典型用法代码示例。如果您正苦于以下问题:C# LuceneVersion类的具体用法?C# LuceneVersion怎么用?C# LuceneVersion使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
LuceneVersion类属于命名空间,在下文中一共展示了LuceneVersion类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: EdgeNGramTokenFilter
public EdgeNGramTokenFilter(LuceneVersion version, TokenStream input, Side side, int minGram, int maxGram)
: base(input)
{
if (version == null)
{
throw new System.ArgumentException("version must not be null");
}
if (version.OnOrAfter(LuceneVersion.LUCENE_44) && side == Side.BACK)
{
throw new System.ArgumentException("Side.BACK is not supported anymore as of Lucene 4.4, use ReverseStringFilter up-front and afterward");
}
if (side == null)
{
throw new System.ArgumentException("sideLabel must be either front or back");
}
if (minGram < 1)
{
throw new System.ArgumentException("minGram must be greater than zero");
}
if (minGram > maxGram)
{
throw new System.ArgumentException("minGram must not be greater than maxGram");
}
this.version = version;
this.charUtils = version.onOrAfter(LuceneVersion.LUCENE_44) ? CharacterUtils.getInstance(version) : CharacterUtils.Java4Instance;
this.minGram = minGram;
this.maxGram = maxGram;
this.side = side;
}
示例2: CodepointCountFilter
/// <summary>
/// Create a new <seealso cref="CodepointCountFilter"/>. This will filter out tokens whose
/// <seealso cref="CharTermAttribute"/> is either too short (<seealso cref="Character#CodePointCount(char[], int, int)"/>
/// < min) or too long (<seealso cref="Character#codePointCount(char[], int, int)"/> > max). </summary>
/// <param name="version"> the Lucene match version </param>
/// <param name="in"> the <seealso cref="TokenStream"/> to consume </param>
/// <param name="min"> the minimum length </param>
/// <param name="max"> the maximum length </param>
public CodepointCountFilter(LuceneVersion version, TokenStream @in, int min, int max)
: base(version, @in)
{
this.min = min;
this.max = max;
termAtt = AddAttribute<ICharTermAttribute>();
}
示例3: StandardFilter
public StandardFilter(LuceneVersion matchVersion, TokenStream @in)
: base(@in)
{
this.matchVersion = matchVersion;
typeAtt = AddAttribute<ITypeAttribute>();
termAtt = AddAttribute<ICharTermAttribute>();
}
示例4: FilteringTokenFilter
/// <summary>
/// Create a new <seealso cref="FilteringTokenFilter"/>. </summary>
/// <param name="version"> the Lucene match version </param>
/// <param name="in"> the <seealso cref="TokenStream"/> to consume </param>
public FilteringTokenFilter(LuceneVersion version, TokenStream @in)
: base(@in)
{
posIncrAtt = AddAttribute<IPositionIncrementAttribute>();
this.version = version;
this.enablePositionIncrements = true;
}
示例5: NGramTokenFilter
/// <summary>
/// Creates NGramTokenFilter with given min and max n-grams. </summary>
/// <param name="version"> Lucene version to enable correct position increments.
/// See <a href="#version">above</a> for details. </param>
/// <param name="input"> <seealso cref="TokenStream"/> holding the input to be tokenized </param>
/// <param name="minGram"> the smallest n-gram to generate </param>
/// <param name="maxGram"> the largest n-gram to generate </param>
public NGramTokenFilter(LuceneVersion version, TokenStream input, int minGram, int maxGram)
: base(new CodepointCountFilter(version, input, minGram, int.MaxValue))
{
this.version = version;
this.charUtils = version.OnOrAfter(
#pragma warning disable 612, 618
LuceneVersion.LUCENE_44) ?
#pragma warning restore 612, 618
CharacterUtils.GetInstance(version) : CharacterUtils.Java4Instance;
if (minGram < 1)
{
throw new System.ArgumentException("minGram must be greater than zero");
}
if (minGram > maxGram)
{
throw new System.ArgumentException("minGram must not be greater than maxGram");
}
this.minGram = minGram;
this.maxGram = maxGram;
#pragma warning disable 612, 618
if (version.OnOrAfter(LuceneVersion.LUCENE_44))
#pragma warning restore 612, 618
{
posIncAtt = AddAttribute<IPositionIncrementAttribute>();
posLenAtt = AddAttribute<IPositionLengthAttribute>();
}
else
{
posIncAtt = new PositionIncrementAttributeAnonymousInnerClassHelper(this);
posLenAtt = new PositionLengthAttributeAnonymousInnerClassHelper(this);
}
termAtt = AddAttribute<ICharTermAttribute>();
offsetAtt = AddAttribute<IOffsetAttribute>();
}
示例6: UpperCaseFilter
/// <summary>
/// Create a new UpperCaseFilter, that normalizes token text to upper case.
/// </summary>
/// <param name="matchVersion"> See <a href="#version">above</a> </param>
/// <param name="in"> TokenStream to filter </param>
public UpperCaseFilter(LuceneVersion matchVersion, TokenStream @in)
: base(@in)
{
termAtt = AddAttribute<ICharTermAttribute>();
termAtt = AddAttribute<ICharTermAttribute>();
charUtils = CharacterUtils.GetInstance(matchVersion);
}
示例7: CompoundWordTokenFilterBase
protected CompoundWordTokenFilterBase(LuceneVersion matchVersion, TokenStream input, CharArraySet dictionary, int minWordSize, int minSubwordSize, int maxSubwordSize, bool onlyLongestMatch)
: base(input)
{
termAtt = AddAttribute<ICharTermAttribute>() as CharTermAttribute;
offsetAtt = AddAttribute<IOffsetAttribute>();
posIncAtt = AddAttribute<IPositionIncrementAttribute>();
this.matchVersion = matchVersion;
this.tokens = new LinkedList<CompoundToken>();
if (minWordSize < 0)
{
throw new System.ArgumentException("minWordSize cannot be negative");
}
this.minWordSize = minWordSize;
if (minSubwordSize < 0)
{
throw new System.ArgumentException("minSubwordSize cannot be negative");
}
this.minSubwordSize = minSubwordSize;
if (maxSubwordSize < 0)
{
throw new System.ArgumentException("maxSubwordSize cannot be negative");
}
this.maxSubwordSize = maxSubwordSize;
this.onlyLongestMatch = onlyLongestMatch;
this.dictionary = dictionary;
}
示例8: TypeTokenFilter
/// <summary>
/// Create a new <seealso cref="TypeTokenFilter"/>. </summary>
/// <param name="version"> the Lucene match version </param>
/// <param name="input"> the <seealso cref="TokenStream"/> to consume </param>
/// <param name="stopTypes"> the types to filter </param>
/// <param name="useWhiteList"> if true, then tokens whose type is in stopTypes will
/// be kept, otherwise they will be filtered out </param>
public TypeTokenFilter(LuceneVersion version, TokenStream input, IEnumerable<string> stopTypes, bool useWhiteList)
: base(version, input)
{
typeAttribute = AddAttribute<ITypeAttribute>();
this.stopTypes = new HashSet<string>(stopTypes);
this.useWhiteList = useWhiteList;
}
示例9: CharTokenizer
/// <summary>
/// Creates a new <seealso cref="CharTokenizer"/> instance
/// </summary>
/// <param name="matchVersion">
/// Lucene version to match </param>
/// <param name="input">
/// the input to split up into tokens </param>
protected CharTokenizer(LuceneVersion matchVersion, TextReader input)
: base(input)
{
termAtt = AddAttribute<ICharTermAttribute>();
offsetAtt = AddAttribute<IOffsetAttribute>();
charUtils = CharacterUtils.GetInstance(matchVersion);
}
示例10: DictionaryCompoundWordTokenFilter
/// <summary>
/// Creates a new <seealso cref="DictionaryCompoundWordTokenFilter"/>
/// </summary>
/// <param name="matchVersion">
/// Lucene version to enable correct Unicode 4.0 behavior in the
/// dictionaries if Version > 3.0. See <a
/// href="CompoundWordTokenFilterBase.html#version"
/// >CompoundWordTokenFilterBase</a> for details. </param>
/// <param name="input">
/// the <seealso cref="TokenStream"/> to process </param>
/// <param name="dictionary">
/// the word dictionary to match against. </param>
public DictionaryCompoundWordTokenFilter(LuceneVersion matchVersion, TokenStream input, CharArraySet dictionary)
: base(matchVersion, input, dictionary)
{
if (dictionary == null)
{
throw new System.ArgumentException("dictionary cannot be null");
}
}
示例11: IndexUpgrader
/// <summary>
/// Creates index upgrader on the given directory, using an <seealso cref="IndexWriter"/> using the given
/// {@code matchVersion}. You have the possibility to upgrade indexes with multiple commit points by removing
/// all older ones. If {@code infoStream} is not {@code null}, all logging output will be sent to this stream.
/// </summary>
public IndexUpgrader(Directory dir, LuceneVersion matchVersion, TextWriter infoStream, bool deletePriorCommits)
: this(dir, new IndexWriterConfig(matchVersion, null), deletePriorCommits)
{
if (null != infoStream)
{
this.Iwc.SetInfoStream(infoStream);
}
}
示例12: CommonGramsFilter
/// <summary>
/// Construct a token stream filtering the given input using a Set of common
/// words to create bigrams. Outputs both unigrams with position increment and
/// bigrams with position increment 0 type=gram where one or both of the words
/// in a potential bigram are in the set of common words .
/// </summary>
/// <param name="input"> TokenStream input in filter chain </param>
/// <param name="commonWords"> The set of common words. </param>
public CommonGramsFilter(LuceneVersion matchVersion, TokenStream input, CharArraySet commonWords)
: base(input)
{
termAttribute = AddAttribute<ICharTermAttribute>();
offsetAttribute = AddAttribute<IOffsetAttribute>();
typeAttribute = AddAttribute<ITypeAttribute>();
posIncAttribute = AddAttribute<IPositionIncrementAttribute>();
posLenAttribute = AddAttribute<IPositionLengthAttribute>();
this.commonWords = commonWords;
}
示例13: TrimFilter
public TrimFilter(LuceneVersion version, TokenStream @in, bool updateOffsets)
: base(@in)
{
if (updateOffsets && version.OnOrAfter(LuceneVersion.LUCENE_44))
{
throw new System.ArgumentException("updateOffsets=true is not supported anymore as of Lucene 4.4");
}
termAtt = AddAttribute<ICharTermAttribute>();
offsetAtt = AddAttribute<IOffsetAttribute>();
this.updateOffsets = updateOffsets;
}
示例14: ThaiWordFilter
private bool hasIllegalOffsets = false; // only if the length changed before this filter
/// <summary>
/// Creates a new ThaiWordFilter with the specified match version. </summary>
public ThaiWordFilter(LuceneVersion matchVersion, TokenStream input)
: base(matchVersion.OnOrAfter(LuceneVersion.LUCENE_31) ? input : new LowerCaseFilter(matchVersion, input))
{
if (!DBBI_AVAILABLE)
{
throw new System.NotSupportedException("This JRE does not have support for Thai segmentation");
}
handlePosIncr = matchVersion.OnOrAfter(LuceneVersion.LUCENE_31);
termAtt = AddAttribute<ICharTermAttribute>();
offsetAtt = AddAttribute<IOffsetAttribute>();
posAtt = AddAttribute<IPositionIncrementAttribute>();
}
示例15: AbstractAnalysisFactory
/// <summary>
/// Initialize this factory via a set of key-value pairs.
/// </summary>
protected internal AbstractAnalysisFactory(IDictionary<string, string> args)
{
ExplicitLuceneMatchVersion = false;
originalArgs = Collections.UnmodifiableMap(args);
string version = Get(args, LUCENE_MATCH_VERSION_PARAM);
// LUCENENET TODO: What should we do if the version is null?
//luceneMatchVersion = version == null ? (LuceneVersion?)null : LuceneVersionHelpers.ParseLeniently(version);
luceneMatchVersion = version == null ?
#pragma warning disable 612, 618
LuceneVersion.LUCENE_CURRENT :
#pragma warning restore 612, 618
LuceneVersionHelpers.ParseLeniently(version);
args.Remove(CLASS_NAME); // consume the class arg
}