本文整理汇总了Java中org.apache.hadoop.hive.ql.exec.vector.ListColumnVector类的典型用法代码示例。如果您正苦于以下问题:Java ListColumnVector类的具体用法?Java ListColumnVector怎么用?Java ListColumnVector使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ListColumnVector类属于org.apache.hadoop.hive.ql.exec.vector包,在下文中一共展示了ListColumnVector类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: process
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector; //导入依赖的package包/类
@Override
public void process(WayContainer container) {
DecimalColumnVector lat = (DecimalColumnVector) batch.cols[3];
DecimalColumnVector lon = (DecimalColumnVector) batch.cols[4];
ListColumnVector nds = (ListColumnVector) batch.cols[5];
checkLimit();
addCommonProperties(container);
lat.isNull[row] = true;
lon.isNull[row] = true;
lat.set(row, (HiveDecimal) null);
lon.set(row, (HiveDecimal) null);
Way way = container.getEntity();
nds.lengths[row] = way.getWayNodes().size();
nds.childCount += nds.lengths[row];
nds.child.ensureSize(nds.childCount, nds.offsets[row] != 0);
for (int j = 0; j < way.getWayNodes().size(); j++) {
StructColumnVector ndsStruct = (StructColumnVector) nds.child;
((LongColumnVector) ndsStruct.fields[0]).vector[(int) nds.offsets[row] + j] = way.getWayNodes().get(j).getNodeId();
}
}
示例2: convert
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector; //导入依赖的package包/类
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
ListColumnVector vector = (ListColumnVector) vect;
JsonArray obj = value.getAsJsonArray();
vector.lengths[row] = obj.size();
vector.offsets[row] = vector.childCount;
vector.childCount += vector.lengths[row];
vector.child.ensureSize(vector.childCount, true);
for (int c = 0; c < obj.size(); ++c) {
childrenConverter.convert(obj.get(c), vector.child,
(int) vector.offsets[row] + c);
}
}
}
示例3: setList
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector; //导入依赖的package包/类
private static void setList(JSONWriter writer, ListColumnVector vector,
TypeDescription schema, int row) throws JSONException {
writer.array();
int offset = (int) vector.offsets[row];
TypeDescription childType = schema.getChildren().get(0);
for (int i = 0; i < vector.lengths[row]; ++i) {
setValue(writer, vector.child, childType, offset + i);
}
writer.endArray();
}
示例4: writeBatch
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector; //导入依赖的package包/类
@Override
void writeBatch(ColumnVector vector, int offset, int length) throws IOException {
super.writeBatch(vector, offset, length);
ListColumnVector vec = (ListColumnVector) vector;
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
int childOffset = (int) vec.offsets[0];
int childLength = (int) vec.lengths[0];
for (int i = 0; i < length; ++i) {
lengths.write(childLength);
childrenWriters[0].writeBatch(vec.child, childOffset, childLength);
}
if (createBloomFilter) {
bloomFilter.addLong(childLength);
}
}
} else {
// write the elements in runs
int currentOffset = 0;
int currentLength = 0;
for (int i = 0; i < length; ++i) {
if (!vec.isNull[i + offset]) {
int nextLength = (int) vec.lengths[offset + i];
int nextOffset = (int) vec.offsets[offset + i];
lengths.write(nextLength);
if (currentLength == 0) {
currentOffset = nextOffset;
currentLength = nextLength;
} else if (currentOffset + currentLength != nextOffset) {
childrenWriters[0].writeBatch(vec.child, currentOffset, currentLength);
currentOffset = nextOffset;
currentLength = nextLength;
} else {
currentLength += nextLength;
}
}
}
if (currentLength != 0) {
childrenWriters[0].writeBatch(vec.child, currentOffset, currentLength);
}
}
}
示例5: addCommonProperties
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector; //导入依赖的package包/类
private void addCommonProperties(EntityContainer container) {
LongColumnVector id = (LongColumnVector) batch.cols[0];
BytesColumnVector type = (BytesColumnVector) batch.cols[1];
MapColumnVector tags = (MapColumnVector) batch.cols[2];
ListColumnVector nds = (ListColumnVector) batch.cols[5];
ListColumnVector members = (ListColumnVector) batch.cols[6];
LongColumnVector changeset = (LongColumnVector) batch.cols[7];
TimestampColumnVector timestamp = (TimestampColumnVector) batch.cols[8];
LongColumnVector uid = (LongColumnVector) batch.cols[9];
BytesColumnVector user = (BytesColumnVector) batch.cols[10];
LongColumnVector version = (LongColumnVector) batch.cols[11];
LongColumnVector visible = (LongColumnVector) batch.cols[12];
Entity entity = container.getEntity();
id.vector[row] = entity.getId();
changeset.vector[row] = entity.getChangesetId();
type.setVal(row, entity.getType().toString().toLowerCase().getBytes());
tags.offsets[row] = tags.childCount;
tags.lengths[row] = entity.getTags().size(); // number of key/value pairings
tags.childCount += tags.lengths[row];
tags.keys.ensureSize(tags.childCount, tags.offsets[row] != 0);
tags.values.ensureSize(tags.childCount, tags.offsets[row] != 0);
int i = 0;
for (Tag tag : entity.getTags()) {
((BytesColumnVector) tags.keys).setVal((int) tags.offsets[row] + i, tag.getKey().getBytes());
((BytesColumnVector) tags.values).setVal((int) tags.offsets[row] + i, tag.getValue().getBytes());
i++;
}
timestamp.time[row] = entity.getTimestamp().getTime();
timestamp.nanos[row] = 0;
uid.vector[row] = entity.getUser().getId();
user.setVal(row, entity.getUser().getName().getBytes());
version.vector[row] = entity.getVersion();
visible.vector[row] = 1;
if (entity.getMetaTags().get("visible") == Boolean.FALSE) {
visible.vector[row] = 0;
}
nds.offsets[row] = nds.childCount;
nds.lengths[row] = 0;
members.offsets[row] = members.childCount;
members.lengths[row] = 0;
}
示例6: setValue
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector; //导入依赖的package包/类
static void setValue(JSONWriter writer, ColumnVector vector,
TypeDescription schema, int row) throws JSONException {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
switch (schema.getCategory()) {
case BOOLEAN:
writer.value(((LongColumnVector) vector).vector[row] != 0);
break;
case BYTE:
case SHORT:
case INT:
case LONG:
writer.value(((LongColumnVector) vector).vector[row]);
break;
case FLOAT:
case DOUBLE:
writer.value(((DoubleColumnVector) vector).vector[row]);
break;
case STRING:
case CHAR:
case VARCHAR:
writer.value(((BytesColumnVector) vector).toString(row));
break;
case DECIMAL:
writer.value(((DecimalColumnVector) vector).vector[row]
.toString());
break;
case DATE:
writer.value(new DateWritable(
(int) ((LongColumnVector) vector).vector[row])
.toString());
break;
case TIMESTAMP:
writer.value(((TimestampColumnVector) vector)
.asScratchTimestamp(row).toString());
break;
case LIST:
setList(writer, (ListColumnVector) vector, schema, row);
break;
case STRUCT:
setStruct(writer, (StructColumnVector) vector, schema, row);
break;
case UNION:
// printUnion(writer, (UnionColumnVector) vector, schema, row);
break;
case BINARY:
// printBinary(writer, (BytesColumnVector) vector, row);
break;
case MAP:
// printMap(writer, (MapColumnVector) vector, schema, row);
break;
default:
throw new IllegalArgumentException("Unknown type "
+ schema.toString());
}
} else {
writer.value(null);
}
}