本文整理汇总了Java中org.apache.lucene.util.Version.LATEST属性的典型用法代码示例。如果您正苦于以下问题:Java Version.LATEST属性的具体用法?Java Version.LATEST怎么用?Java Version.LATEST使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类org.apache.lucene.util.Version
的用法示例。
在下文中一共展示了Version.LATEST属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: DutchAnalyzer
public DutchAnalyzer(CharArraySet stopwords){
this(Version.LATEST, stopwords);
}
示例2: ComplexPhraseQueryParser
public ComplexPhraseQueryParser(String f, Analyzer a) {
this(Version.LATEST, f, a);
}
示例3: CompoundWordTokenFilterBase
protected CompoundWordTokenFilterBase(TokenStream input, CharArraySet dictionary, boolean onlyLongestMatch) {
this(Version.LATEST, input,dictionary,onlyLongestMatch);
}
示例4: ThaiWordFilter
/** Creates a new ThaiWordFilter with the specified match version. */
public ThaiWordFilter(TokenStream input) {
this(Version.LATEST, input);
}
示例5: AnalyzingQueryParser
public AnalyzingQueryParser(String field, Analyzer analyzer) {
this(Version.LATEST, field, analyzer);
}
示例6: StandardTokenizer
/**
* Creates a new StandardTokenizer with a given {@link org.apache.lucene.util.AttributeFactory}
*/
public StandardTokenizer(AttributeFactory factory, Reader input) {
this(Version.LATEST, factory, input);
}
示例7: LengthFilter
/**
* Create a new {@link LengthFilter}. This will filter out tokens whose
* {@link CharTermAttribute} is either too short ({@link CharTermAttribute#length()}
* < min) or too long ({@link CharTermAttribute#length()} > max).
* @param in the {@link TokenStream} to consume
* @param min the minimum length
* @param max the maximum length
*/
public LengthFilter(TokenStream in, int min, int max) {
this(Version.LATEST, in, min, max);
}
示例8: KeepWordFilter
/**
* Create a new {@link KeepWordFilter}.
* <p><b>NOTE</b>: The words set passed to this constructor will be directly
* used by this filter and should not be modified.
* @param in the {@link TokenStream} to consume
* @param words the words to keep
*/
public KeepWordFilter(TokenStream in, CharArraySet words) {
this(Version.LATEST, in, words);
}
示例9: MultiFieldQueryParser
/**
* Creates a MultiFieldQueryParser.
* Allows passing of a map with term to Boost, and the boost to apply to each term.
*
* <p>It will, when parse(String query)
* is called, construct a query like this (assuming the query consists of
* two terms and you specify the two fields <code>title</code> and <code>body</code>):</p>
*
* <code>
* (title:term1 body:term1) (title:term2 body:term2)
* </code>
*
* <p>When setDefaultOperator(AND_OPERATOR) is set, the result will be:</p>
*
* <code>
* +(title:term1 body:term1) +(title:term2 body:term2)
* </code>
*
* <p>When you pass a boost (title=>5 body=>10) you can get </p>
*
* <code>
* +(title:term1^5.0 body:term1^10.0) +(title:term2^5.0 body:term2^10.0)
* </code>
*
* <p>In other words, all the query's terms must appear, but it doesn't matter in
* what fields they appear.</p>
*/
public MultiFieldQueryParser(String[] fields, Analyzer analyzer, Map<String,Float> boosts) {
this(Version.LATEST, fields, analyzer, boosts);
}
示例10: Lucene43EdgeNGramTokenizer
/**
* Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
*
* @param factory {@link org.apache.lucene.util.AttributeFactory} to use
* @param input {@link Reader} holding the input to be tokenized
* @param minGram the smallest n-gram to generate
* @param maxGram the largest n-gram to generate
*/
public Lucene43EdgeNGramTokenizer(AttributeFactory factory, Reader input, int minGram, int maxGram) {
this(Version.LATEST, factory, input, Side.FRONT, minGram, maxGram);
}
示例11: EdgeNGramTokenizer
/**
* Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
*
* @param input {@link Reader} holding the input to be tokenized
* @param minGram the smallest n-gram to generate
* @param maxGram the largest n-gram to generate
*/
public EdgeNGramTokenizer(Reader input, int minGram, int maxGram) {
super(Version.LATEST, input, minGram, maxGram, true);
}
示例12: NGramTokenFilter
/**
* Creates NGramTokenFilter with given min and max n-grams.
* @param input {@link TokenStream} holding the input to be tokenized
* @param minGram the smallest n-gram to generate
* @param maxGram the largest n-gram to generate
*/
public NGramTokenFilter(TokenStream input, int minGram, int maxGram) {
this(Version.LATEST, input, minGram, maxGram);
}
示例13: TypeTokenFilter
/**
* Create a new {@link TypeTokenFilter}.
* @param input the {@link TokenStream} to consume
* @param stopTypes the types to filter
* @param useWhiteList if true, then tokens whose type is in stopTypes will
* be kept, otherwise they will be filtered out
*/
public TypeTokenFilter(TokenStream input, Set<String> stopTypes, boolean useWhiteList) {
this(Version.LATEST, input, stopTypes, useWhiteList);
}
示例14: ReverseStringFilter
/**
* Create a new ReverseStringFilter that reverses and marks all tokens in the
* supplied {@link TokenStream}.
* <p>
* The reversed tokens will be prepended (marked) by the <code>marker</code>
* character.
* </p>
*
* @param in {@link TokenStream} to filter
* @param marker A character used to mark reversed tokens
*/
public ReverseStringFilter(TokenStream in, char marker) {
super(in);
this.matchVersion = Version.LATEST;
this.marker = marker;
}
示例15: NGramTokenizer
/**
* Creates NGramTokenizer with given min and max n-grams.
* @param input {@link Reader} holding the input to be tokenized
* @param minGram the smallest n-gram to generate
* @param maxGram the largest n-gram to generate
*/
public NGramTokenizer(Reader input, int minGram, int maxGram) {
this(Version.LATEST, input, minGram, maxGram, false);
}