本文整理汇总了Java中org.apache.hadoop.hive.serde2.columnar.BytesRefWritable.getLength方法的典型用法代码示例。如果您正苦于以下问题:Java BytesRefWritable.getLength方法的具体用法?Java BytesRefWritable.getLength怎么用?Java BytesRefWritable.getLength使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.serde2.columnar.BytesRefWritable
的用法示例。
在下文中一共展示了BytesRefWritable.getLength方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: parseBooleanColumn
import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
private void parseBooleanColumn(int column)
{
// don't include column number in message because it causes boxing which is expensive here
checkArgument(!isPartitionColumn[column], "Column is a partition key");
loaded[column] = true;
if (hiveColumnIndexes[column] >= value.size()) {
// this partition may contain fewer fields than what's declared in the schema
// this happens when additional columns are added to the hive table after a partition has been created
nulls[column] = true;
}
else {
BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);
byte[] bytes;
try {
bytes = fieldData.getData();
}
catch (IOException e) {
throw Throwables.propagate(e);
}
int start = fieldData.getStart();
int length = fieldData.getLength();
parseBooleanColumn(column, bytes, start, length);
}
}
示例2: parseLongColumn
import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
private void parseLongColumn(int column)
{
// don't include column number in message because it causes boxing which is expensive here
checkArgument(!isPartitionColumn[column], "Column is a partition key");
loaded[column] = true;
if (hiveColumnIndexes[column] >= value.size()) {
// this partition may contain fewer fields than what's declared in the schema
// this happens when additional columns are added to the hive table after a partition has been created
nulls[column] = true;
}
else {
BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);
byte[] bytes;
try {
bytes = fieldData.getData();
}
catch (IOException e) {
throw Throwables.propagate(e);
}
int start = fieldData.getStart();
int length = fieldData.getLength();
parseLongColumn(column, bytes, start, length);
}
}
示例3: parseDoubleColumn
import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
private void parseDoubleColumn(int column)
{
// don't include column number in message because it causes boxing which is expensive here
checkArgument(!isPartitionColumn[column], "Column is a partition key");
loaded[column] = true;
if (hiveColumnIndexes[column] >= value.size()) {
// this partition may contain fewer fields than what's declared in the schema
// this happens when additional columns are added to the hive table after a partition has been created
nulls[column] = true;
}
else {
BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);
byte[] bytes;
try {
bytes = fieldData.getData();
}
catch (IOException e) {
throw Throwables.propagate(e);
}
int start = fieldData.getStart();
int length = fieldData.getLength();
parseDoubleColumn(column, bytes, start, length);
}
}
示例4: parseStringColumn
import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
private void parseStringColumn(int column)
{
// don't include column number in message because it causes boxing which is expensive here
checkArgument(!isPartitionColumn[column], "Column is a partition key");
loaded[column] = true;
if (hiveColumnIndexes[column] >= value.size()) {
// this partition may contain fewer fields than what's declared in the schema
// this happens when additional columns are added to the hive table after a partition has been created
nulls[column] = true;
}
else {
BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);
byte[] bytes;
try {
bytes = fieldData.getData();
}
catch (IOException e) {
throw Throwables.propagate(e);
}
int start = fieldData.getStart();
int length = fieldData.getLength();
parseStringColumn(column, bytes, start, length);
}
}
示例5: load
import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
@Override
public void load(LazyFixedWidthBlock block)
{
if (loaded) {
return;
}
try {
BytesRefArrayWritable columnBatch = batch.getColumn(fieldId);
int positionInBatch = batch.getPositionInBatch();
int batchSize = block.getPositionCount();
boolean[] isNull = new boolean[batchSize];
long[] vector = new long[batchSize];
for (int i = 0; i < batchSize; i++) {
BytesRefWritable writable = columnBatch.unCheckedGet(i + positionInBatch);
byte[] bytes = writable.getData();
int start = writable.getStart();
int length = writable.getLength();
if (length == 0 || isNull(bytes, start, length)) {
isNull[i] = true;
}
else {
vector[i] = parseLong(bytes, start, length);
}
}
block.setNullVector(isNull);
block.setRawSlice(wrappedLongArray(vector));
loaded = true;
}
catch (IOException e) {
throw Throwables.propagate(e);
}
}
示例6: load
import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
@Override
public void load(LazyArrayBlock block)
{
if (loaded) {
return;
}
try {
BytesRefArrayWritable columnBatch = batch.getColumn(fieldId);
int positionInBatch = batch.getPositionInBatch();
int batchSize = block.getPositionCount();
BlockBuilder blockBuilder = type.createBlockBuilder(new BlockBuilderStatus(), batchSize);
for (int i = 0; i < batchSize; i++) {
BytesRefWritable writable = columnBatch.unCheckedGet(i + positionInBatch);
int length = writable.getLength();
if (length > 0) {
byte[] bytes = writable.getData();
int start = writable.getStart();
LazyBinaryObject lazyObject = LazyBinaryFactory.createLazyBinaryObject(fieldInspector);
ByteArrayRef byteArrayRef = new ByteArrayRef();
byteArrayRef.setData(bytes);
lazyObject.init(byteArrayRef, start, length);
serializeObject(type, blockBuilder, lazyObject.getObject(), fieldInspector);
}
}
block.copyFromBlock(blockBuilder.build());
loaded = true;
}
catch (IOException e) {
throw Throwables.propagate(e);
}
}