本文整理汇总了Java中org.apache.lucene.util.IntsRef.grow方法的典型用法代码示例。如果您正苦于以下问题:Java IntsRef.grow方法的具体用法?Java IntsRef.grow怎么用?Java IntsRef.grow使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.util.IntsRef
的用法示例。
在下文中一共展示了IntsRef.grow方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: toIntsRefUTF32
import org.apache.lucene.util.IntsRef; //导入方法依赖的package包/类
static IntsRef toIntsRefUTF32(String s, IntsRef ir) {
final int charLength = s.length();
int charIdx = 0;
int intIdx = 0;
while(charIdx < charLength) {
if (intIdx == ir.ints.length) {
ir.grow(intIdx+1);
}
final int utf32 = s.codePointAt(charIdx);
ir.ints[intIdx] = utf32;
charIdx += Character.charCount(utf32);
intIdx++;
}
ir.length = intIdx;
return ir;
}
示例2: getOrdinals
import org.apache.lucene.util.IntsRef; //导入方法依赖的package包/类
@Override
public void getOrdinals(int docID, IntsRef ints) throws IOException {
IntsRef tmp = new IntsRef(ints.length);
for (CategoryListIterator cli : validIterators) {
cli.getOrdinals(docID, tmp);
if (ints.ints.length < ints.length + tmp.length) {
ints.grow(ints.length + tmp.length);
}
ints.length += tmp.length;
}
}
示例3: toIntsRef
import org.apache.lucene.util.IntsRef; //导入方法依赖的package包/类
static IntsRef toIntsRef(BytesRef br, IntsRef ir) {
if (br.length > ir.ints.length) {
ir.grow(br.length);
}
for(int i=0;i<br.length;i++) {
ir.ints[i] = br.bytes[br.offset+i]&0xFF;
}
ir.length = br.length;
return ir;
}
示例4: randomAcceptedWord
import org.apache.lucene.util.IntsRef; //导入方法依赖的package包/类
private T randomAcceptedWord(FST<T> fst, IntsRef in) throws IOException {
FST.Arc<T> arc = fst.getFirstArc(new FST.Arc<T>());
final List<FST.Arc<T>> arcs = new ArrayList<FST.Arc<T>>();
in.length = 0;
in.offset = 0;
final T NO_OUTPUT = fst.outputs.getNoOutput();
T output = NO_OUTPUT;
final FST.BytesReader fstReader = fst.getBytesReader();
while(true) {
// read all arcs:
fst.readFirstTargetArc(arc, arc, fstReader);
arcs.add(new FST.Arc<T>().copyFrom(arc));
while(!arc.isLast()) {
fst.readNextArc(arc, fstReader);
arcs.add(new FST.Arc<T>().copyFrom(arc));
}
// pick one
arc = arcs.get(random.nextInt(arcs.size()));
arcs.clear();
// accumulate output
output = fst.outputs.add(output, arc.output);
// append label
if (arc.label == FST.END_LABEL) {
break;
}
if (in.ints.length == in.length) {
in.grow(1+in.length);
}
in.ints[in.length++] = arc.label;
}
return output;
}
示例5: getFiniteStrings
import org.apache.lucene.util.IntsRef; //导入方法依赖的package包/类
/**
* Returns the strings that can be produced from the given state, or
* false if more than <code>limit</code> strings are found.
* <code>limit</code><0 means "infinite".
*/
private static boolean getFiniteStrings(State s, HashSet<State> pathstates,
HashSet<IntsRef> strings, IntsRef path, int limit) {
pathstates.add(s);
for (Transition t : s.getTransitions()) {
if (pathstates.contains(t.to)) {
return false;
}
for (int n = t.min; n <= t.max; n++) {
path.grow(path.length+1);
path.ints[path.length] = n;
path.length++;
if (t.to.accept) {
strings.add(IntsRef.deepCopyOf(path));
if (limit >= 0 && strings.size() > limit) {
return false;
}
}
if (!getFiniteStrings(t.to, pathstates, strings, path, limit)) {
return false;
}
path.length--;
}
}
pathstates.remove(s);
return true;
}
示例6: toUTF16
import org.apache.lucene.util.IntsRef; //导入方法依赖的package包/类
/** Just maps each UTF16 unit (char) to the ints in an
* IntsRef. */
public static IntsRef toUTF16(CharSequence s, IntsRef scratch) {
final int charLimit = s.length();
scratch.offset = 0;
scratch.length = charLimit;
scratch.grow(charLimit);
for (int idx = 0; idx < charLimit; idx++) {
scratch.ints[idx] = (int) s.charAt(idx);
}
return scratch;
}
示例7: toUTF32
import org.apache.lucene.util.IntsRef; //导入方法依赖的package包/类
/** Decodes the Unicode codepoints from the provided
* CharSequence and places them in the provided scratch
* IntsRef, which must not be null, returning it. */
public static IntsRef toUTF32(CharSequence s, IntsRef scratch) {
int charIdx = 0;
int intIdx = 0;
final int charLimit = s.length();
while(charIdx < charLimit) {
scratch.grow(intIdx+1);
final int utf32 = Character.codePointAt(s, charIdx);
scratch.ints[intIdx] = utf32;
charIdx += Character.charCount(utf32);
intIdx++;
}
scratch.length = intIdx;
return scratch;
}
示例8: toIntsRef
import org.apache.lucene.util.IntsRef; //导入方法依赖的package包/类
/** Just takes unsigned byte values from the BytesRef and
* converts into an IntsRef. */
public static IntsRef toIntsRef(BytesRef input, IntsRef scratch) {
scratch.grow(input.length);
for(int i=0;i<input.length;i++) {
scratch.ints[i] = input.bytes[i+input.offset] & 0xFF;
}
scratch.length = input.length;
return scratch;
}
示例9: toUTF32
import org.apache.lucene.util.IntsRef; //导入方法依赖的package包/类
/** Decodes the Unicode codepoints from the provided
* char[] and places them in the provided scratch
* IntsRef, which must not be null, returning it. */
public static IntsRef toUTF32(char[] s, int offset, int length, IntsRef scratch) {
int charIdx = offset;
int intIdx = 0;
final int charLimit = offset + length;
while(charIdx < charLimit) {
scratch.grow(intIdx+1);
final int utf32 = Character.codePointAt(s, charIdx, charLimit);
scratch.ints[intIdx] = utf32;
charIdx += Character.charCount(utf32);
intIdx++;
}
scratch.length = intIdx;
return scratch;
}
示例10: addFields
import org.apache.lucene.util.IntsRef; //导入方法依赖的package包/类
/** Adds the needed facet fields to the document. */
public void addFields(Document doc, Iterable<CategoryPath> categories) throws IOException {
if (categories == null) {
throw new IllegalArgumentException("categories should not be null");
}
// TODO: add reuse capabilities to this class, per CLP objects:
// - drill-down field
// - counting list field
// - DrillDownStream
// - CountingListStream
final Map<CategoryListParams,Iterable<CategoryPath>> categoryLists = createCategoryListMapping(categories);
// for each CLP we add a different field for drill-down terms as well as for
// counting list data.
IntsRef ordinals = new IntsRef(32); // should be enough for most common applications
for (Entry<CategoryListParams, Iterable<CategoryPath>> e : categoryLists.entrySet()) {
final CategoryListParams clp = e.getKey();
final String field = clp.field;
// build category list data
ordinals.length = 0; // reset
int maxNumOrds = 0;
for (CategoryPath cp : e.getValue()) {
int ordinal = taxonomyWriter.addCategory(cp);
maxNumOrds += cp.length; // ordinal and potentially all parents
if (ordinals.ints.length < maxNumOrds) {
ordinals.grow(maxNumOrds);
}
ordinals.ints[ordinals.length++] = ordinal;
}
Map<String,BytesRef> categoriesData = getCategoryListData(clp, ordinals, e.getValue());
// add the counting list data
addCountingListData(doc, categoriesData, field);
// add the drill-down field
DrillDownStream drillDownStream = getDrillDownStream(e.getValue());
Field drillDown = new Field(field, drillDownStream, drillDownFieldType());
doc.add(drillDown);
}
}
示例11: processFacetFields
import org.apache.lucene.util.IntsRef; //导入方法依赖的package包/类
private void processFacetFields(TaxonomyWriter taxoWriter, Map<String,List<FacetField>> byField, Document doc) throws IOException {
for(Map.Entry<String,List<FacetField>> ent : byField.entrySet()) {
String indexFieldName = ent.getKey();
//System.out.println(" indexFieldName=" + indexFieldName + " fields=" + ent.getValue());
IntsRef ordinals = new IntsRef(32);
for(FacetField facetField : ent.getValue()) {
FacetsConfig.DimConfig ft = getDimConfig(facetField.dim);
if (facetField.path.length > 1 && ft.hierarchical == false) {
throw new IllegalArgumentException("dimension \"" + facetField.dim + "\" is not hierarchical yet has " + facetField.path.length + " components");
}
FacetLabel cp = new FacetLabel(facetField.dim, facetField.path);
checkTaxoWriter(taxoWriter);
int ordinal = taxoWriter.addCategory(cp);
if (ordinals.length == ordinals.ints.length) {
ordinals.grow(ordinals.length+1);
}
ordinals.ints[ordinals.length++] = ordinal;
//System.out.println("ords[" + (ordinals.length-1) + "]=" + ordinal);
//System.out.println(" add cp=" + cp);
if (ft.multiValued && (ft.hierarchical || ft.requireDimCount)) {
//System.out.println(" add parents");
// Add all parents too:
int parent = taxoWriter.getParent(ordinal);
while (parent > 0) {
if (ordinals.ints.length == ordinals.length) {
ordinals.grow(ordinals.length+1);
}
ordinals.ints[ordinals.length++] = parent;
parent = taxoWriter.getParent(parent);
}
if (ft.requireDimCount == false) {
// Remove last (dimension) ord:
ordinals.length--;
}
}
// Drill down:
for (int i=1;i<=cp.length;i++) {
doc.add(new StringField(indexFieldName, pathToString(cp.components, i), Field.Store.NO));
}
}
// Facet counts:
// DocValues are considered stored fields:
doc.add(new BinaryDocValuesField(indexFieldName, dedupAndEncode(ordinals)));
}
}