本文整理汇总了Java中org.apache.lucene.index.Term.field方法的典型用法代码示例。如果您正苦于以下问题:Java Term.field方法的具体用法?Java Term.field怎么用?Java Term.field使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.index.Term
的用法示例。
在下文中一共展示了Term.field方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: convert
import org.apache.lucene.index.Term; //导入方法依赖的package包/类
@CheckForNull
@Override
@SuppressWarnings("StringEquality")
public Void convert(@NonNull final Index.WithTermFrequencies.TermFreq param) throws Stop {
final Term term = param.getTerm();
if (fieldName != term.field()) {
throw new Stop();
}
final int docCount = param.getFreq();
final String encBinName = term.text();
final String binName = encBinName.substring(
0,
encBinName.length() - postfixLen);
final int dotIndex = binName.lastIndexOf('.'); //NOI18N
final String pkgName = dotIndex == -1 ? "" : binName.substring(0, dotIndex); //NOI18N
final Integer typeCount = typeFreq.get(binName);
final Integer pkgCount = pkgFreq.get(pkgName);
typeFreq.put(binName, typeCount == null ? docCount : docCount + typeCount);
pkgFreq.put(pkgName, pkgCount == null ? docCount : docCount + pkgCount);
return null;
}
示例2: convert
import org.apache.lucene.index.Term; //导入方法依赖的package包/类
@Override
public String convert(Term currentTerm) throws Stop {
if (fieldName != currentTerm.field()) {
throw STOP;
}
String currentText = currentTerm.text();
if (all || currentText.startsWith(value)) {
if (directOnly) {
int index = currentText.indexOf('.', value.length()); //NOI18N
if (index>0) {
currentText = currentText.substring(0,index);
}
}
return currentText;
}
return null;
}
示例3: reset
import org.apache.lucene.index.Term; //导入方法依赖的package包/类
void reset(FieldInfo fieldInfo) throws IOException {
//System.out.println("pff.reset te=" + termEnum);
this.fieldInfo = fieldInfo;
internedFieldName = fieldInfo.name.intern();
final Term term = new Term(internedFieldName);
if (termEnum == null) {
termEnum = getTermsDict().terms(term);
seekTermEnum = getTermsDict().terms(term);
//System.out.println(" term=" + termEnum.term());
} else {
getTermsDict().seekEnum(termEnum, term, true);
}
skipNext = true;
unicodeSortOrder = sortTermsByUnicode();
final Term t = termEnum.term();
if (t != null && t.field() == internedFieldName) {
newSuffixStart = 0;
prevTerm.clear();
surrogateDance();
}
}
示例4: seekEnum
import org.apache.lucene.index.Term; //导入方法依赖的package包/类
void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
PagedBytesDataInput input = dataInput.clone();
input.setPosition(indexToDataOffset.get(indexOffset));
// read the term
int fieldId = input.readVInt();
Term field = fields[fieldId];
Term term = new Term(field.field(), input.readString());
// read the terminfo
TermInfo termInfo = new TermInfo();
termInfo.docFreq = input.readVInt();
if (termInfo.docFreq >= skipInterval) {
termInfo.skipOffset = input.readVInt();
} else {
termInfo.skipOffset = 0;
}
termInfo.freqPointer = input.readVLong();
termInfo.proxPointer = input.readVLong();
long pointer = input.readVLong();
// perform the seek
enumerator.seek(pointer, ((long) indexOffset * totalIndexInterval) - 1, term, termInfo);
}
示例5: UnionDocsAndPositionsEnum
import org.apache.lucene.index.Term; //导入方法依赖的package包/类
public UnionDocsAndPositionsEnum(Bits liveDocs, AtomicReaderContext context, Term[] terms, Map<Term,TermContext> termContexts, TermsEnum termsEnum) throws IOException {
List<DocsAndPositionsEnum> docsEnums = new LinkedList<>();
for (int i = 0; i < terms.length; i++) {
final Term term = terms[i];
TermState termState = termContexts.get(term).get(context.ord);
if (termState == null) {
// Term doesn't exist in reader
continue;
}
termsEnum.seekExact(term.bytes(), termState);
DocsAndPositionsEnum postings = termsEnum.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
if (postings == null) {
// term does exist, but has no positions
throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run PhraseQuery (term=" + term.text() + ")");
}
cost += postings.cost();
docsEnums.add(postings);
}
_queue = new DocsQueue(docsEnums);
_posList = new IntQueue();
}
示例6: FuzzyQuery
import org.apache.lucene.index.Term; //导入方法依赖的package包/类
/**
* Create a new FuzzyQuery that will match terms with an edit distance
* of at most <code>maxEdits</code> to <code>term</code>.
* If a <code>prefixLength</code> > 0 is specified, a common prefix
* of that length is also required.
*
* @param term the term to search for
* @param maxEdits must be >= 0 and <= {@link LevenshteinAutomata#MAXIMUM_SUPPORTED_DISTANCE}.
* @param prefixLength length of common (non-fuzzy) prefix
* @param maxExpansions the maximum number of terms to match. If this number is
* greater than {@link BooleanQuery#getMaxClauseCount} when the query is rewritten,
* then the maxClauseCount will be used instead.
* @param transpositions true if transpositions should be treated as a primitive
* edit operation. If this is false, comparisons will implement the classic
* Levenshtein algorithm.
*/
public FuzzyQuery(Term term, int maxEdits, int prefixLength, int maxExpansions, boolean transpositions) {
super(term.field());
if (maxEdits < 0 || maxEdits > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) {
throw new IllegalArgumentException("maxEdits must be between 0 and " + LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE);
}
if (prefixLength < 0) {
throw new IllegalArgumentException("prefixLength cannot be negative.");
}
if (maxExpansions <= 0) {
throw new IllegalArgumentException("maxExpansions must be positive.");
}
this.term = term;
this.maxEdits = maxEdits;
this.prefixLength = prefixLength;
this.transpositions = transpositions;
this.maxExpansions = maxExpansions;
setRewriteMethod(new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(maxExpansions));
}
示例7: FreqCollector
import org.apache.lucene.index.Term; //导入方法依赖的package包/类
FreqCollector(
@NonNull final Term startTerm,
@NonNull final Map<String,Integer> typeFreqs,
@NonNull final Map<String,Integer> pkgFreq) {
this.fieldName = startTerm.field();
this.typeFreq = typeFreqs;
this.pkgFreq = pkgFreq;
}
示例8: RegexpTermEnum
import org.apache.lucene.index.Term; //导入方法依赖的package包/类
public RegexpTermEnum(
final IndexReader in,
final String fieldName,
final Pattern pattern,
final String startPrefix) throws IOException {
final Term term = new Term(fieldName,startPrefix);
this.fieldName = term.field();
this.pattern = pattern;
this.startPrefix = startPrefix;
setEnum(in.terms(term));
}
示例9: termCompare
import org.apache.lucene.index.Term; //导入方法依赖的package包/类
@Override
protected boolean termCompare(Term term) {
if (fieldName == term.field()) {
String searchText = term.text();
if (searchText.startsWith(startPrefix)) {
return pattern.matcher(term.text()).matches();
}
}
endEnum = true;
return false;
}
示例10: accept
import org.apache.lucene.index.Term; //导入方法依赖的package包/类
@Override
public FieldSelectorResult accept(String fieldName) {
for (Term t : terms) {
if (fieldName == t.field()) {
return FieldSelectorResult.LOAD;
}
}
return FieldSelectorResult.NO_LOAD;
}
示例11: getTerm
import org.apache.lucene.index.Term; //导入方法依赖的package包/类
/**
* Gets the term at the given position. For testing.
*
* @param termIndex
* the position to read the term from the index.
* @return the term.
* @throws IOException If there is a low-level I/O error.
*/
Term getTerm(int termIndex) throws IOException {
PagedBytesDataInput input = dataInput.clone();
input.setPosition(indexToDataOffset.get(termIndex));
// read the term
int fieldId = input.readVInt();
Term field = fields[fieldId];
return new Term(field.field(), input.readString());
}
示例12: add
import org.apache.lucene.index.Term; //导入方法依赖的package包/类
/**
* Adds a term to the end of the query phrase.
* The relative position of the term within the phrase is specified explicitly.
* This allows e.g. phrases with more than one term at the same position
* or phrases with gaps (e.g. in connection with stopwords).
*
*/
public void add(Term term, int position) {
if (terms.size() == 0) {
field = term.field();
} else if (!term.field().equals(field)) {
throw new IllegalArgumentException("All phrase terms must be in the same field: " + term);
}
terms.add(term);
positions.add(Integer.valueOf(position));
if (position > maxPosition) maxPosition = position;
}
示例13: PackageFilter
import org.apache.lucene.index.Term; //导入方法依赖的package包/类
PackageFilter(final @NonNull Term startTerm, final boolean directOnly) {
this.fieldName = startTerm.field();
this.value = startTerm.text();
this.directOnly = directOnly;
this.all = value.length() == 0;
}
示例14: deepCopyOf
import org.apache.lucene.index.Term; //导入方法依赖的package包/类
static Term deepCopyOf(Term other) {
return new Term(other.field(), BytesRef.deepCopyOf(other.bytes()));
}
示例15: TermInfosReaderIndex
import org.apache.lucene.index.Term; //导入方法依赖的package包/类
/**
* Loads the segment information at segment load time.
*
* @param indexEnum
* the term enum.
* @param indexDivisor
* the index divisor.
* @param tiiFileLength
* the size of the tii file, used to approximate the size of the
* buffer.
* @param totalIndexInterval
* the total index interval.
*/
TermInfosReaderIndex(SegmentTermEnum indexEnum, int indexDivisor, long tiiFileLength, int totalIndexInterval) throws IOException {
this.totalIndexInterval = totalIndexInterval;
indexSize = 1 + ((int) indexEnum.size - 1) / indexDivisor;
skipInterval = indexEnum.skipInterval;
// this is only an inital size, it will be GCed once the build is complete
long initialSize = (long) (tiiFileLength * 1.5) / indexDivisor;
PagedBytes dataPagedBytes = new PagedBytes(estimatePageBits(initialSize));
PagedBytesDataOutput dataOutput = dataPagedBytes.getDataOutput();
final int bitEstimate = 1+MathUtil.log(tiiFileLength, 2);
GrowableWriter indexToTerms = new GrowableWriter(bitEstimate, indexSize, PackedInts.DEFAULT);
String currentField = null;
List<String> fieldStrs = new ArrayList<>();
int fieldCounter = -1;
for (int i = 0; indexEnum.next(); i++) {
Term term = indexEnum.term();
if (currentField == null || !currentField.equals(term.field())) {
currentField = term.field();
fieldStrs.add(currentField);
fieldCounter++;
}
TermInfo termInfo = indexEnum.termInfo();
indexToTerms.set(i, dataOutput.getPosition());
dataOutput.writeVInt(fieldCounter);
dataOutput.writeString(term.text());
dataOutput.writeVInt(termInfo.docFreq);
if (termInfo.docFreq >= skipInterval) {
dataOutput.writeVInt(termInfo.skipOffset);
}
dataOutput.writeVLong(termInfo.freqPointer);
dataOutput.writeVLong(termInfo.proxPointer);
dataOutput.writeVLong(indexEnum.indexPointer);
for (int j = 1; j < indexDivisor; j++) {
if (!indexEnum.next()) {
break;
}
}
}
fields = new Term[fieldStrs.size()];
for (int i = 0; i < fields.length; i++) {
fields[i] = new Term(fieldStrs.get(i));
}
dataPagedBytes.freeze(true);
dataInput = dataPagedBytes.getDataInput();
indexToDataOffset = indexToTerms.getMutable();
long ramBytesUsed = RamUsageEstimator.shallowSizeOf(fields);
ramBytesUsed += RamUsageEstimator.shallowSizeOf(dataInput);
ramBytesUsed += fields.length * RamUsageEstimator.shallowSizeOfInstance(Term.class);
ramBytesUsed += dataPagedBytes.ramBytesUsed();
ramBytesUsed += indexToDataOffset.ramBytesUsed();
this.ramBytesUsed = ramBytesUsed;
}