本文整理汇总了Java中org.apache.hadoop.hive.serde2.columnar.BytesRefWritable.getData方法的典型用法代码示例。如果您正苦于以下问题:Java BytesRefWritable.getData方法的具体用法?Java BytesRefWritable.getData怎么用?Java BytesRefWritable.getData使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.serde2.columnar.BytesRefWritable
的用法示例。
在下文中一共展示了BytesRefWritable.getData方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: parseBooleanColumn
import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
private void parseBooleanColumn(int column)
{
// don't include column number in message because it causes boxing which is expensive here
checkArgument(!isPartitionColumn[column], "Column is a partition key");
loaded[column] = true;
if (hiveColumnIndexes[column] >= value.size()) {
// this partition may contain fewer fields than what's declared in the schema
// this happens when additional columns are added to the hive table after a partition has been created
nulls[column] = true;
}
else {
BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);
byte[] bytes;
try {
bytes = fieldData.getData();
}
catch (IOException e) {
throw Throwables.propagate(e);
}
int start = fieldData.getStart();
int length = fieldData.getLength();
parseBooleanColumn(column, bytes, start, length);
}
}
示例2: parseLongColumn
import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
private void parseLongColumn(int column)
{
// don't include column number in message because it causes boxing which is expensive here
checkArgument(!isPartitionColumn[column], "Column is a partition key");
loaded[column] = true;
if (hiveColumnIndexes[column] >= value.size()) {
// this partition may contain fewer fields than what's declared in the schema
// this happens when additional columns are added to the hive table after a partition has been created
nulls[column] = true;
}
else {
BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);
byte[] bytes;
try {
bytes = fieldData.getData();
}
catch (IOException e) {
throw Throwables.propagate(e);
}
int start = fieldData.getStart();
int length = fieldData.getLength();
parseLongColumn(column, bytes, start, length);
}
}
示例3: parseDoubleColumn
import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
private void parseDoubleColumn(int column)
{
// don't include column number in message because it causes boxing which is expensive here
checkArgument(!isPartitionColumn[column], "Column is a partition key");
loaded[column] = true;
if (hiveColumnIndexes[column] >= value.size()) {
// this partition may contain fewer fields than what's declared in the schema
// this happens when additional columns are added to the hive table after a partition has been created
nulls[column] = true;
}
else {
BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);
byte[] bytes;
try {
bytes = fieldData.getData();
}
catch (IOException e) {
throw Throwables.propagate(e);
}
int start = fieldData.getStart();
int length = fieldData.getLength();
parseDoubleColumn(column, bytes, start, length);
}
}
示例4: load
import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
@Override
public void load(LazySliceArrayBlock block)
{
if (loaded) {
return;
}
try {
BytesRefArrayWritable columnBatch = batch.getColumn(fieldId);
int positionInBatch = batch.getPositionInBatch();
int batchSize = block.getPositionCount();
Slice[] vector = new Slice[batchSize];
for (int i = 0; i < batchSize; i++) {
BytesRefWritable writable = columnBatch.unCheckedGet(i + positionInBatch);
int length = writable.getLength();
if (length > 0) {
byte[] bytes = writable.getData();
int start = writable.getStart();
if ((length == 1) && bytes[start] == HIVE_EMPTY_STRING_BYTE) {
vector[i] = Slices.EMPTY_SLICE;
}
else {
vector[i] = Slices.wrappedBuffer(Arrays.copyOfRange(bytes, start, start + length));
}
}
}
block.setValues(vector);
loaded = true;
}
catch (IOException e) {
throw Throwables.propagate(e);
}
}
示例5: parseObjectColumn
import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
private void parseObjectColumn(int column)
{
// don't include column number in message because it causes boxing which is expensive here
checkArgument(!isPartitionColumn[column], "Column is a partition key");
loaded[column] = true;
if (hiveColumnIndexes[column] >= value.size()) {
// this partition may contain fewer fields than what's declared in the schema
// this happens when additional columns are added to the hive table after a partition has been created
nulls[column] = true;
}
else {
BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);
byte[] bytes;
try {
bytes = fieldData.getData();
}
catch (IOException e) {
throw Throwables.propagate(e);
}
int start = fieldData.getStart();
int length = fieldData.getLength();
parseObjectColumn(column, bytes, start, length);
}
}
示例6: parseStringColumn
import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
private void parseStringColumn(int column)
{
// don't include column number in message because it causes boxing which is expensive here
checkArgument(!isPartitionColumn[column], "Column is a partition key");
loaded[column] = true;
if (hiveColumnIndexes[column] >= value.size()) {
// this partition may contain fewer fields than what's declared in the schema
// this happens when additional columns are added to the hive table after a partition has been created
nulls[column] = true;
}
else {
BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);
byte[] bytes;
try {
bytes = fieldData.getData();
}
catch (IOException e) {
throw Throwables.propagate(e);
}
int start = fieldData.getStart();
int length = fieldData.getLength();
parseStringColumn(column, bytes, start, length);
}
}
示例7: load
import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; //导入方法依赖的package包/类
@Override
public void load(LazyFixedWidthBlock block)
{
if (loaded) {
return;
}
try {
BytesRefArrayWritable columnBatch = batch.getColumn(fieldId);
int positionInBatch = batch.getPositionInBatch();
int batchSize = block.getPositionCount();
boolean[] isNull = new boolean[batchSize];
double[] vector = new double[batchSize];
for (int i = 0; i < batchSize; i++) {
BytesRefWritable writable = columnBatch.unCheckedGet(i + positionInBatch);
byte[] bytes = writable.getData();
int start = writable.getStart();
int length = writable.getLength();
if (length == 0 || isNull(bytes, start, length)) {
isNull[i] = true;
}
else {
vector[i] = parseDouble(bytes, start, length);
}
}
block.setNullVector(isNull);
block.setRawSlice(wrappedDoubleArray(vector));
loaded = true;
}
catch (IOException e) {
throw Throwables.propagate(e);
}
}