本文整理汇总了Java中org.apache.hadoop.hive.serde2.objectinspector.UnionObjectInspector.getObjectInspectors方法的典型用法代码示例。如果您正苦于以下问题:Java UnionObjectInspector.getObjectInspectors方法的具体用法?Java UnionObjectInspector.getObjectInspectors怎么用?Java UnionObjectInspector.getObjectInspectors使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.serde2.objectinspector.UnionObjectInspector
的用法示例。
在下文中一共展示了UnionObjectInspector.getObjectInspectors方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: UnionTreeWriter
import org.apache.hadoop.hive.serde2.objectinspector.UnionObjectInspector; //导入方法依赖的package包/类
UnionTreeWriter(int columnId,
ObjectInspector inspector,
StreamFactory writer,
boolean nullable, Configuration conf,
boolean useVInts, boolean lowMemoryMode,
MemoryEstimate memoryEstimate) throws IOException {
super(columnId, inspector, writer, nullable, conf, useVInts, memoryEstimate);
UnionObjectInspector insp = (UnionObjectInspector) inspector;
List<ObjectInspector> choices = insp.getObjectInspectors();
childrenWriters = new TreeWriter[choices.size()];
for(int i=0; i < childrenWriters.length; ++i) {
childrenWriters[i] = createTreeWriter(choices.get(i), writer, true, conf, useVInts,
lowMemoryMode, memoryEstimate);
}
tags =
new RunLengthByteWriter(writer.createStream(columnId,
OrcProto.Stream.Kind.DATA));
recordPosition(rowIndexPosition);
}
示例2: convert
import org.apache.hadoop.hive.serde2.objectinspector.UnionObjectInspector; //导入方法依赖的package包/类
/**
* Convert the value using the ObjectInspector. The Writable values are converted to their
* respective Java objects from using the provided inspector.
*
* @param oi the field object inspector
* @param value the value
* @return the corresponding Java object value
*/
public static Object convert(final ObjectInspector oi, final Object value) {
if (value == null) {
return null;
}
Object outValue = null;
switch (oi.getCategory()) {
case PRIMITIVE:
outValue = OrcReadFunctionMap.get(oi.getTypeName()).apply(value);
break;
case LIST:
final ListObjectInspector loi = (ListObjectInspector) oi;
final ObjectInspector eoi = loi.getListElementObjectInspector();
outValue =
loi.getList(value).stream().map(e -> convert(eoi, e)).collect(Collectors.toList());
break;
case MAP:
final MapObjectInspector moi = (MapObjectInspector) oi;
final ObjectInspector koi = moi.getMapKeyObjectInspector();
final ObjectInspector voi = moi.getMapValueObjectInspector();
outValue = moi.getMap(value).entrySet().stream()
.collect(Collectors.toMap(e -> convert(koi, e.getKey()),
e -> convert(voi, e.getValue()), throwingMerger(), LinkedHashMap::new));
break;
case STRUCT:
final StructObjectInspector soi = (StructObjectInspector) oi;
outValue = soi.getAllStructFieldRefs().stream()
.map(e -> convert(e.getFieldObjectInspector(), soi.getStructFieldData(value, e)))
.toArray();
break;
case UNION:
final UnionObjectInspector uoi = (UnionObjectInspector) oi;
final List<? extends ObjectInspector> ois = uoi.getObjectInspectors();
final byte tag = uoi.getTag(value);
outValue = new Object[] {tag, convert(ois.get(tag), uoi.getField(value))};
break;
}
return outValue;
}
示例3: UnionConverter
import org.apache.hadoop.hive.serde2.objectinspector.UnionObjectInspector; //导入方法依赖的package包/类
public UnionConverter(ConverterFactory factory, UnionObjectInspector inspector) {
for (ObjectInspector child : inspector.getObjectInspectors()) {
converters.add(factory.newConverter(child));
}
}