本文整理汇总了Java中org.apache.lucene.util.ArrayUtil.grow方法的典型用法代码示例。如果您正苦于以下问题:Java ArrayUtil.grow方法的具体用法?Java ArrayUtil.grow怎么用?Java ArrayUtil.grow使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.util.ArrayUtil
的用法示例。
在下文中一共展示了ArrayUtil.grow方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addPosition
import org.apache.lucene.util.ArrayUtil; //导入方法依赖的package包/类
void addPosition(int position, int startOffset, int length, int payloadLength) {
if (hasPositions) {
if (posStart + totalPositions == positionsBuf.length) {
positionsBuf = ArrayUtil.grow(positionsBuf);
}
positionsBuf[posStart + totalPositions] = position;
}
if (hasOffsets) {
if (offStart + totalPositions == startOffsetsBuf.length) {
final int newLength = ArrayUtil.oversize(offStart + totalPositions, 4);
startOffsetsBuf = Arrays.copyOf(startOffsetsBuf, newLength);
lengthsBuf = Arrays.copyOf(lengthsBuf, newLength);
}
startOffsetsBuf[offStart + totalPositions] = startOffset;
lengthsBuf[offStart + totalPositions] = length;
}
if (hasPayloads) {
if (payStart + totalPositions == payloadLengthsBuf.length) {
payloadLengthsBuf = ArrayUtil.grow(payloadLengthsBuf);
}
payloadLengthsBuf[payStart + totalPositions] = payloadLength;
}
++totalPositions;
}
示例2: decompress
import org.apache.lucene.util.ArrayUtil; //导入方法依赖的package包/类
/**
* Decompress the chunk.
*/
void decompress() throws IOException {
// decompress data
final int chunkSize = chunkSize();
if (version >= VERSION_BIG_CHUNKS && chunkSize >= 2 * CompressingStoredFieldsReader.this.chunkSize) {
bytes.offset = bytes.length = 0;
for (int decompressed = 0; decompressed < chunkSize; ) {
final int toDecompress = Math.min(chunkSize - decompressed, CompressingStoredFieldsReader.this.chunkSize);
decompressor.decompress(fieldsStream, toDecompress, 0, toDecompress, spare);
bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + spare.length);
System.arraycopy(spare.bytes, spare.offset, bytes.bytes, bytes.length, spare.length);
bytes.length += spare.length;
decompressed += toDecompress;
}
} else {
decompressor.decompress(fieldsStream, chunkSize, 0, chunkSize, bytes);
}
if (bytes.length != chunkSize) {
throw new CorruptIndexException("Corrupted: expected chunk size = " + chunkSize() + ", got " + bytes.length + " (resource=" + fieldsStream + ")");
}
}
示例3: addOneValue
import org.apache.lucene.util.ArrayUtil; //导入方法依赖的package包/类
private void addOneValue(BytesRef value) {
int termID = hash.add(value);
if (termID < 0) {
termID = -termID-1;
} else {
// reserve additional space for each unique value:
// 1. when indexing, when hash is 50% full, rehash() suddenly needs 2*size ints.
// TODO: can this same OOM happen in THPF?
// 2. when flushing, we need 1 int per value (slot in the ordMap).
iwBytesUsed.addAndGet(2 * RamUsageEstimator.NUM_BYTES_INT);
}
if (currentUpto == currentValues.length) {
currentValues = ArrayUtil.grow(currentValues, currentValues.length+1);
// reserve additional space for max # values per-doc
// when flushing, we need an int[] to sort the mapped-ords within the doc
iwBytesUsed.addAndGet((currentValues.length - currentUpto) * 2 * RamUsageEstimator.NUM_BYTES_INT);
}
currentValues[currentUpto] = termID;
currentUpto++;
}
示例4: next
import org.apache.lucene.util.ArrayUtil; //导入方法依赖的package包/类
@Override
public BytesRef next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
int count = counts.next().intValue();
int maxSize = count*9; // worst case
if (maxSize > buffer.length) {
buffer = ArrayUtil.grow(buffer, maxSize);
}
try {
encodeValues(count);
} catch (IOException bogus) {
throw new RuntimeException(bogus);
}
ref.bytes = buffer;
ref.offset = 0;
ref.length = out.getPosition();
return ref;
}
示例5: checkReadBytes
import org.apache.lucene.util.ArrayUtil; //导入方法依赖的package包/类
private void checkReadBytes(IndexInput input, int size, int pos) throws IOException{
// Just to see that "offset" is treated properly in readBytes(), we
// add an arbitrary offset at the beginning of the array
int offset = size % 10; // arbitrary
buffer = ArrayUtil.grow(buffer, offset+size);
assertEquals(pos, input.getFilePointer());
long left = TEST_FILE_LENGTH - input.getFilePointer();
if (left <= 0) {
return;
} else if (left < size) {
size = (int) left;
}
input.readBytes(buffer, offset, size);
assertEquals(pos+size, input.getFilePointer());
for(int i=0; i<size; i++) {
assertEquals("pos=" + i + " filepos=" + (pos+i), byten(pos+i), buffer[offset+i]);
}
}
示例6: BufferedInputIterator
import org.apache.lucene.util.ArrayUtil; //导入方法依赖的package包/类
/** Creates a new iterator, buffering entries from the specified iterator */
public BufferedInputIterator(InputIterator source) throws IOException {
BytesRef spare;
int freqIndex = 0;
hasPayloads = source.hasPayloads();
hasContexts = source.hasContexts();
while((spare = source.next()) != null) {
entries.append(spare);
if (hasPayloads) {
payloads.append(source.payload());
}
if (hasContexts) {
contextSets.add(source.contexts());
}
if (freqIndex >= freqs.length) {
freqs = ArrayUtil.grow(freqs, freqs.length+1);
}
freqs[freqIndex++] = source.weight();
}
comp = source.getComparator();
}
示例7: build
import org.apache.lucene.util.ArrayUtil; //导入方法依赖的package包/类
/**
* Build a minimal, deterministic automaton from a sorted list of {@link BytesRef} representing
* strings in UTF-8. These strings must be binary-sorted.
*/
public static Automaton build(Collection<BytesRef> input) {
final DaciukMihovAutomatonBuilder builder = new DaciukMihovAutomatonBuilder();
char[] chars = new char[0];
CharsRef ref = new CharsRef();
for (BytesRef b : input) {
chars = ArrayUtil.grow(chars, b.length);
final int len = UnicodeUtil.UTF8toUTF16(b, chars);
ref.chars = chars;
ref.length = len;
builder.add(ref);
}
Automaton.Builder a = new Automaton.Builder();
convert(a,
builder.complete(),
new IdentityHashMap<State,Integer>());
return a.finish();
}
示例8: collect
import org.apache.lucene.util.ArrayUtil; //导入方法依赖的package包/类
@Override
public void collect(int doc) throws IOException {
int ord = collectedTerms.add(fromDocTerms.get(doc));
if (ord < 0) {
ord = -ord - 1;
} else {
if (ord >= scoreSums.length) {
scoreSums = ArrayUtil.grow(scoreSums);
scoreCounts = ArrayUtil.grow(scoreCounts);
}
}
float current = scorer.score();
float existing = scoreSums[ord];
if (Float.compare(existing, 0.0f) == 0) {
scoreSums[ord] = current;
scoreCounts[ord] = 1;
} else {
scoreSums[ord] = scoreSums[ord] + current;
scoreCounts[ord]++;
}
}
示例9: doSetDocument
import org.apache.lucene.util.ArrayUtil; //导入方法依赖的package包/类
@Override
public void doSetDocument(int docId) {
inner.setDocument(docId);
final int innerCardinality = inner.cardinality();
ords = ArrayUtil.grow(ords, innerCardinality);
cardinality = 0;
for (int slot = 0; slot < innerCardinality; slot++) {
long ord = inner.ordAt(slot);
if (accepted.get(ord)) {
ords[cardinality++] = ord;
}
}
}
示例10: add
import org.apache.lucene.util.ArrayUtil; //导入方法依赖的package包/类
/**
* Add a character to the word being stemmed. When you are finished adding
* characters, you can call stem(void) to process the word.
*/
public void add(char ch) {
if (b.length <= i) {
b = ArrayUtil.grow(b, i + 1);
}
b[i++] = ch;
}
示例11: initMemory
import org.apache.lucene.util.ArrayUtil; //导入方法依赖的package包/类
private void initMemory(Terms curTerms, int termFreq) {
// init memory for performance reasons
if (curTerms.hasPositions()) {
currentPositions = ArrayUtil.grow(currentPositions, termFreq);
}
if (curTerms.hasOffsets()) {
currentStartOffset = ArrayUtil.grow(currentStartOffset, termFreq);
currentEndOffset = ArrayUtil.grow(currentEndOffset, termFreq);
}
if (curTerms.hasPayloads()) {
currentPayloads = new BytesArray[termFreq];
}
}
示例12: pushTerm
import org.apache.lucene.util.ArrayUtil; //导入方法依赖的package包/类
/** Pushes the new term to the top of the stack, and writes new blocks. */
private void pushTerm(BytesRef text) throws IOException {
int limit = Math.min(lastTerm.length(), text.length);
// Find common prefix between last term and current term:
int pos = 0;
while (pos < limit && lastTerm.byteAt(pos) == text.bytes[text.offset+pos]) {
pos++;
}
// if (DEBUG) System.out.println(" shared=" + pos + " lastTerm.length=" + lastTerm.length);
// Close the "abandoned" suffix now:
for(int i=lastTerm.length()-1;i>=pos;i--) {
// How many items on top of the stack share the current suffix
// we are closing:
int prefixTopSize = pending.size() - prefixStarts[i];
if (prefixTopSize >= minItemsInBlock) {
// if (DEBUG) System.out.println("pushTerm i=" + i + " prefixTopSize=" + prefixTopSize + " minItemsInBlock=" + minItemsInBlock);
writeBlocks(i+1, prefixTopSize);
prefixStarts[i] -= prefixTopSize-1;
}
}
if (prefixStarts.length < text.length) {
prefixStarts = ArrayUtil.grow(prefixStarts, text.length);
}
// Init new tail:
for(int i=pos;i<text.length;i++) {
prefixStarts[i] = pending.size();
}
lastTerm.copyBytes(text);
}
示例13: append
import org.apache.lucene.util.ArrayUtil; //导入方法依赖的package包/类
/**
* Appends a copy of the given {@link BytesRef} to this {@link BytesRefArray}.
* @param bytes the bytes to append
* @return the index of the appended bytes
*/
public int append(BytesRef bytes) {
if (lastElement >= offsets.length) {
int oldLen = offsets.length;
offsets = ArrayUtil.grow(offsets, offsets.length + 1);
bytesUsed.addAndGet((offsets.length - oldLen)
* RamUsageEstimator.NUM_BYTES_INT);
}
pool.append(bytes);
offsets[lastElement++] = currentOffset;
currentOffset += bytes.length;
return lastElement-1;
}
示例14: copy
import org.apache.lucene.util.ArrayUtil; //导入方法依赖的package包/类
/** Copies over all states/transitions from other. The states numbers
* are sequentially assigned (appended). */
public void copy(Automaton other) {
// Bulk copy and then fixup the state pointers:
int stateOffset = getNumStates();
states = ArrayUtil.grow(states, nextState + other.nextState);
System.arraycopy(other.states, 0, states, nextState, other.nextState);
for(int i=0;i<other.nextState;i += 2) {
if (states[nextState+i] != -1) {
states[nextState+i] += nextTransition;
}
}
nextState += other.nextState;
int otherNumStates = other.getNumStates();
BitSet otherAcceptStates = other.getAcceptStates();
int state = 0;
while (state < otherNumStates && (state = otherAcceptStates.nextSetBit(state)) != -1) {
setAccept(stateOffset + state, true);
state++;
}
// Bulk copy and then fixup dest for each transition:
transitions = ArrayUtil.grow(transitions, nextTransition + other.nextTransition);
System.arraycopy(other.transitions, 0, transitions, nextTransition, other.nextTransition);
for(int i=0;i<other.nextTransition;i += 3) {
transitions[nextTransition+i] += stateOffset;
}
nextTransition += other.nextTransition;
if (other.deterministic == false) {
deterministic = false;
}
}
示例15: caseFoldTitle
import org.apache.lucene.util.ArrayUtil; //导入方法依赖的package包/类
/** folds titlecase variant of word to titleBuffer */
private void caseFoldTitle(char word[], int length) {
titleBuffer = ArrayUtil.grow(titleBuffer, length);
System.arraycopy(word, 0, titleBuffer, 0, length);
for (int i = 1; i < length; i++) {
titleBuffer[i] = dictionary.caseFold(titleBuffer[i]);
}
}