本文整理汇总了Java中org.elasticsearch.common.io.stream.StreamInput.readBytesRef方法的典型用法代码示例。如果您正苦于以下问题:Java StreamInput.readBytesRef方法的具体用法?Java StreamInput.readBytesRef怎么用?Java StreamInput.readBytesRef使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.elasticsearch.common.io.stream.StreamInput
的用法示例。
在下文中一共展示了StreamInput.readBytesRef方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: Bucket
import org.elasticsearch.common.io.stream.StreamInput; //导入方法依赖的package包/类
private Bucket(StreamInput in, DocValueFormat format, boolean keyed) throws IOException {
this.format = format;
this.keyed = keyed;
key = in.readOptionalString();
if (in.readBoolean()) {
from = in.readBytesRef();
} else {
from = null;
}
if (in.readBoolean()) {
to = in.readBytesRef();
} else {
to = null;
}
docCount = in.readLong();
aggregations = InternalAggregations.readAggregations(in);
}
示例2: readFrom
import org.elasticsearch.common.io.stream.StreamInput; //导入方法依赖的package包/类
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
id = in.readLong();
int termsSize = in.readVInt();
if (termsSize == 0) {
terms = EMPTY_TERMS;
} else {
terms = new Term[termsSize];
for (int i = 0; i < terms.length; i++) {
terms[i] = new Term(in.readString(), in.readBytesRef());
}
}
this.termStatistics = readTermStats(in, terms);
readFieldStats(in, fieldStatistics);
maxDoc = in.readVInt();
}
示例3: readSortValue
import org.elasticsearch.common.io.stream.StreamInput; //导入方法依赖的package包/类
private static Comparable readSortValue(StreamInput in) throws IOException {
byte type = in.readByte();
if (type == 0) {
return null;
} else if (type == 1) {
return in.readString();
} else if (type == 2) {
return in.readInt();
} else if (type == 3) {
return in.readLong();
} else if (type == 4) {
return in.readFloat();
} else if (type == 5) {
return in.readDouble();
} else if (type == 6) {
return in.readByte();
} else if (type == 7) {
return in.readShort();
} else if (type == 8) {
return in.readBoolean();
} else if (type == 9) {
return in.readBytesRef();
} else {
throw new IOException("Can't match type [" + type + "]");
}
}
示例4: readFrom
import org.elasticsearch.common.io.stream.StreamInput; //导入方法依赖的package包/类
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
id = in.readLong();
int termsSize = in.readVInt();
if (termsSize == 0) {
terms = EMPTY_TERMS;
} else {
terms = new Term[termsSize];
for (int i = 0; i < terms.length; i++) {
terms[i] = new Term(in.readString(), in.readBytesRef());
}
}
this.termStatistics = readTermStats(in, terms);
readFieldStats(in, fieldStatistics);
maxDoc = in.readVInt();
}
示例5: testRandomReads
import org.elasticsearch.common.io.stream.StreamInput; //导入方法依赖的package包/类
public void testRandomReads() throws IOException {
int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20));
BytesReference pbr = newBytesReference(length);
StreamInput streamInput = pbr.streamInput();
BytesRefBuilder target = new BytesRefBuilder();
while (target.length() < pbr.length()) {
switch (randomIntBetween(0, 10)) {
case 6:
case 5:
target.append(new BytesRef(new byte[]{streamInput.readByte()}));
break;
case 4:
case 3:
BytesRef bytesRef = streamInput.readBytesRef(scaledRandomIntBetween(1, pbr.length() - target.length()));
target.append(bytesRef);
break;
default:
byte[] buffer = new byte[scaledRandomIntBetween(1, pbr.length() - target.length())];
int offset = scaledRandomIntBetween(0, buffer.length - 1);
int read = streamInput.read(buffer, offset, buffer.length - offset);
target.append(new BytesRef(buffer, offset, read));
break;
}
}
assertEquals(pbr.length(), target.length());
BytesRef targetBytes = target.get();
assertArrayEquals(BytesReference.toBytes(pbr), Arrays.copyOfRange(targetBytes.bytes, targetBytes.offset, targetBytes.length));
}
示例6: Bucket
import org.elasticsearch.common.io.stream.StreamInput; //导入方法依赖的package包/类
/**
* Read from a stream.
*/
public Bucket(StreamInput in, long subsetSize, long supersetSize, DocValueFormat format) throws IOException {
super(in, subsetSize, supersetSize, format);
termBytes = in.readBytesRef();
subsetDf = in.readVLong();
supersetDf = in.readVLong();
score = in.readDouble();
aggregations = InternalAggregations.readAggregations(in);
}
示例7: readFrom
import org.elasticsearch.common.io.stream.StreamInput; //导入方法依赖的package包/类
@Override
public void readFrom(StreamInput in) throws IOException {
int size = in.readVInt();
termStatistics = HppcMaps.newMap(size);
for (int i = 0; i < size; i++) {
Term term = new Term(in.readString(), in.readBytesRef());
TermStatistics stats = new TermStatistics(in.readBytesRef(),
in.readVLong(),
DfsSearchResult.subOne(in.readVLong()));
termStatistics.put(term, stats);
}
fieldStatistics = DfsSearchResult.readFieldStats(in);
maxDoc = in.readVLong();
}
示例8: StoreFileMetaData
import org.elasticsearch.common.io.stream.StreamInput; //导入方法依赖的package包/类
/**
* Read from a stream.
*/
public StoreFileMetaData(StreamInput in) throws IOException {
name = in.readString();
length = in.readVLong();
checksum = in.readString();
// TODO Why not Version.parse?
writtenBy = Lucene.parseVersionLenient(in.readString(), FIRST_LUCENE_CHECKSUM_VERSION);
hash = in.readBytesRef();
}
示例9: readFieldDoc
import org.elasticsearch.common.io.stream.StreamInput; //导入方法依赖的package包/类
public static FieldDoc readFieldDoc(StreamInput in) throws IOException {
Comparable[] cFields = new Comparable[in.readVInt()];
for (int j = 0; j < cFields.length; j++) {
byte type = in.readByte();
if (type == 0) {
cFields[j] = null;
} else if (type == 1) {
cFields[j] = in.readString();
} else if (type == 2) {
cFields[j] = in.readInt();
} else if (type == 3) {
cFields[j] = in.readLong();
} else if (type == 4) {
cFields[j] = in.readFloat();
} else if (type == 5) {
cFields[j] = in.readDouble();
} else if (type == 6) {
cFields[j] = in.readByte();
} else if (type == 7) {
cFields[j] = in.readShort();
} else if (type == 8) {
cFields[j] = in.readBoolean();
} else if (type == 9) {
cFields[j] = in.readBytesRef();
} else {
throw new IOException("Can't match type [" + type + "]");
}
}
return new FieldDoc(in.readVInt(), in.readFloat(), cFields);
}
示例10: readValueFrom
import org.elasticsearch.common.io.stream.StreamInput; //导入方法依赖的package包/类
@Override
public BytesRef readValueFrom(StreamInput in) throws IOException {
int length = in.readVInt() -1 ;
if (length == -1) {
return null;
}
return in.readBytesRef(length);
}
示例11: readFrom
import org.elasticsearch.common.io.stream.StreamInput; //导入方法依赖的package包/类
@Override
public void readFrom(StreamInput in) throws IOException {
termBytes = in.readBytesRef();
subsetDf = in.readVLong();
supersetDf = in.readVLong();
score = in.readDouble();
aggregations = InternalAggregations.readAggregations(in);
}
示例12: readFrom
import org.elasticsearch.common.io.stream.StreamInput; //导入方法依赖的package包/类
@Override
public void readFrom(StreamInput in) throws IOException {
termBytes = in.readBytesRef();
docCount = in.readVLong();
docCountError = -1;
if (showDocCountError) {
docCountError = in.readLong();
}
aggregations = InternalAggregations.readAggregations(in);
}
示例13: readFrom
import org.elasticsearch.common.io.stream.StreamInput; //导入方法依赖的package包/类
@Override
public void readFrom(StreamInput in) throws IOException {
int size = in.readVInt();
termStatistics = HppcMaps.newMap(size);
for (int i = 0; i < size; i++) {
Term term = new Term(in.readString(), in.readBytesRef());
TermStatistics stats = new TermStatistics(in.readBytesRef(),
in.readVLong(),
DfsSearchResult.subOne(in.readVLong()));
termStatistics.put(term, stats);
}
fieldStatistics = DfsSearchResult.readFieldStats(in);
maxDoc = in.readVLong();
}
示例14: readFrom
import org.elasticsearch.common.io.stream.StreamInput; //导入方法依赖的package包/类
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
length = in.readVLong();
checksum = in.readOptionalString();
String versionString = in.readOptionalString();
writtenBy = Lucene.parseVersionLenient(versionString, null);
hash = in.readBytesRef();
}
示例15: readFrom
import org.elasticsearch.common.io.stream.StreamInput; //导入方法依赖的package包/类
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
percolatorTypeId = in.readByte();
requestedSize = in.readVInt();
count = in.readVLong();
matches = new BytesRef[in.readVInt()];
for (int i = 0; i < matches.length; i++) {
matches[i] = in.readBytesRef();
}
scores = new float[in.readVInt()];
for (int i = 0; i < scores.length; i++) {
scores[i] = in.readFloat();
}
int size = in.readVInt();
for (int i = 0; i < size; i++) {
int mSize = in.readVInt();
Map<String, HighlightField> fields = new HashMap<>();
for (int j = 0; j < mSize; j++) {
fields.put(in.readString(), HighlightField.readHighlightField(in));
}
hls.add(fields);
}
aggregations = InternalAggregations.readOptionalAggregations(in);
if (in.readBoolean()) {
int pipelineAggregatorsSize = in.readVInt();
List<SiblingPipelineAggregator> pipelineAggregators = new ArrayList<>(pipelineAggregatorsSize);
for (int i = 0; i < pipelineAggregatorsSize; i++) {
BytesReference type = in.readBytesReference();
PipelineAggregator pipelineAggregator = PipelineAggregatorStreams.stream(type).readResult(in);
pipelineAggregators.add((SiblingPipelineAggregator) pipelineAggregator);
}
this.pipelineAggregators = pipelineAggregators;
}
}