当前位置: 首页>>代码示例>>Java>>正文


Java BytesRefWritable.getLength方法代码示例

本文整理汇总了Java中org.apache.hadoop.hive.serde2.columnar.BytesRefWritable.getLength方法的典型用法代码示例。如果您正苦于以下问题:Java BytesRefWritable.getLength方法的具体用法?Java BytesRefWritable.getLength怎么用?Java BytesRefWritable.getLength使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hive.serde2.columnar.BytesRefWritable的用法示例。


在下文中一共展示了BytesRefWritable.getLength方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: parseBooleanColumn

import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
private void parseBooleanColumn(int column)
{
    // don't include column number in message because it causes boxing which is expensive here
    checkArgument(!isPartitionColumn[column], "Column is a partition key");

    loaded[column] = true;

    if (hiveColumnIndexes[column] >= value.size()) {
        // this partition may contain fewer fields than what's declared in the schema
        // this happens when additional columns are added to the hive table after a partition has been created
        nulls[column] = true;
    }
    else {
        BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);

        byte[] bytes;
        try {
            bytes = fieldData.getData();
        }
        catch (IOException e) {
            throw Throwables.propagate(e);
        }

        int start = fieldData.getStart();
        int length = fieldData.getLength();

        parseBooleanColumn(column, bytes, start, length);
    }
}
 
开发者ID:y-lan,项目名称:presto,代码行数:30,代码来源:ColumnarTextHiveRecordCursor.java

示例2: parseLongColumn

import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
private void parseLongColumn(int column)
{
    // don't include column number in message because it causes boxing which is expensive here
    checkArgument(!isPartitionColumn[column], "Column is a partition key");

    loaded[column] = true;

    if (hiveColumnIndexes[column] >= value.size()) {
        // this partition may contain fewer fields than what's declared in the schema
        // this happens when additional columns are added to the hive table after a partition has been created
        nulls[column] = true;
    }
    else {
        BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);

        byte[] bytes;
        try {
            bytes = fieldData.getData();
        }
        catch (IOException e) {
            throw Throwables.propagate(e);
        }

        int start = fieldData.getStart();
        int length = fieldData.getLength();

        parseLongColumn(column, bytes, start, length);
    }
}
 
开发者ID:y-lan,项目名称:presto,代码行数:30,代码来源:ColumnarTextHiveRecordCursor.java

示例3: parseDoubleColumn

import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
private void parseDoubleColumn(int column)
{
    // don't include column number in message because it causes boxing which is expensive here
    checkArgument(!isPartitionColumn[column], "Column is a partition key");

    loaded[column] = true;

    if (hiveColumnIndexes[column] >= value.size()) {
        // this partition may contain fewer fields than what's declared in the schema
        // this happens when additional columns are added to the hive table after a partition has been created
        nulls[column] = true;
    }
    else {
        BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);

        byte[] bytes;
        try {
            bytes = fieldData.getData();
        }
        catch (IOException e) {
            throw Throwables.propagate(e);
        }

        int start = fieldData.getStart();
        int length = fieldData.getLength();

        parseDoubleColumn(column, bytes, start, length);
    }
}
 
开发者ID:y-lan,项目名称:presto,代码行数:30,代码来源:ColumnarTextHiveRecordCursor.java

示例4: parseStringColumn

import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
private void parseStringColumn(int column)
{
    // don't include column number in message because it causes boxing which is expensive here
    checkArgument(!isPartitionColumn[column], "Column is a partition key");

    loaded[column] = true;

    if (hiveColumnIndexes[column] >= value.size()) {
        // this partition may contain fewer fields than what's declared in the schema
        // this happens when additional columns are added to the hive table after a partition has been created
        nulls[column] = true;
    }
    else {
        BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);

        byte[] bytes;
        try {
            bytes = fieldData.getData();
        }
        catch (IOException e) {
            throw Throwables.propagate(e);
        }

        int start = fieldData.getStart();
        int length = fieldData.getLength();

        parseStringColumn(column, bytes, start, length);
    }
}
 
开发者ID:y-lan,项目名称:presto,代码行数:30,代码来源:ColumnarTextHiveRecordCursor.java

示例5: load

import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
@Override
public void load(LazyFixedWidthBlock block)
{
    if (loaded) {
        return;
    }

    try {
        BytesRefArrayWritable columnBatch = batch.getColumn(fieldId);
        int positionInBatch = batch.getPositionInBatch();

        int batchSize = block.getPositionCount();
        boolean[] isNull = new boolean[batchSize];
        long[] vector = new long[batchSize];

        for (int i = 0; i < batchSize; i++) {
            BytesRefWritable writable = columnBatch.unCheckedGet(i + positionInBatch);

            byte[] bytes = writable.getData();
            int start = writable.getStart();
            int length = writable.getLength();
            if (length == 0 || isNull(bytes, start, length)) {
                isNull[i] = true;
            }
            else {
                vector[i] = parseLong(bytes, start, length);
            }
        }

        block.setNullVector(isNull);
        block.setRawSlice(wrappedLongArray(vector));

        loaded = true;
    }
    catch (IOException e) {
        throw Throwables.propagate(e);
    }
}
 
开发者ID:y-lan,项目名称:presto,代码行数:39,代码来源:RcTextBlockLoader.java

示例6: load

import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
@Override
public void load(LazyArrayBlock block)
{
    if (loaded) {
        return;
    }

    try {
        BytesRefArrayWritable columnBatch = batch.getColumn(fieldId);
        int positionInBatch = batch.getPositionInBatch();

        int batchSize = block.getPositionCount();
        BlockBuilder blockBuilder = type.createBlockBuilder(new BlockBuilderStatus(), batchSize);

        for (int i = 0; i < batchSize; i++) {
            BytesRefWritable writable = columnBatch.unCheckedGet(i + positionInBatch);

            int length = writable.getLength();
            if (length > 0) {
                byte[] bytes = writable.getData();
                int start = writable.getStart();
                LazyBinaryObject lazyObject = LazyBinaryFactory.createLazyBinaryObject(fieldInspector);
                ByteArrayRef byteArrayRef = new ByteArrayRef();
                byteArrayRef.setData(bytes);
                lazyObject.init(byteArrayRef, start, length);
                serializeObject(type, blockBuilder, lazyObject.getObject(), fieldInspector);
            }
        }

        block.copyFromBlock(blockBuilder.build());

        loaded = true;
    }
    catch (IOException e) {
        throw Throwables.propagate(e);
    }
}
 
开发者ID:y-lan,项目名称:presto,代码行数:38,代码来源:RcBinaryBlockLoader.java


注:本文中的org.apache.hadoop.hive.serde2.columnar.BytesRefWritable.getLength方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。