本文整理汇总了Java中org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo.getAllStructFieldTypeInfos方法的典型用法代码示例。如果您正苦于以下问题:Java StructTypeInfo.getAllStructFieldTypeInfos方法的具体用法?Java StructTypeInfo.getAllStructFieldTypeInfos怎么用?Java StructTypeInfo.getAllStructFieldTypeInfos使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo
的用法示例。
在下文中一共展示了StructTypeInfo.getAllStructFieldTypeInfos方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createStructObjectInspector
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public static LazySimpleStructObjectInspector createStructObjectInspector(TypeInfo type,
LazySerDeParameters serdeParams) {
StructTypeInfo structTypeInfo = (StructTypeInfo) type;
List<String> fieldNames = structTypeInfo.getAllStructFieldNames();
List<TypeInfo> fieldTypeInfos = structTypeInfo.getAllStructFieldTypeInfos();
List<ObjectInspector> fieldObjectInspectors = new ArrayList<ObjectInspector>(fieldTypeInfos.size());
for (int i = 0; i < fieldTypeInfos.size(); i++) {
fieldObjectInspectors.add(createObjectInspector(fieldTypeInfos.get(i), serdeParams));
}
return LazyObjectInspectorFactory.getLazySimpleStructObjectInspector(
fieldNames, fieldObjectInspectors, null,
serdeParams.getSeparators()[1],
serdeParams, ObjectInspectorOptions.JAVA);
// return ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldObjectInspectors);
}
示例2: deserializeStruct
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
private Object deserializeStruct(StructTypeInfo type, Object data)
throws SerDeException {
if (!(data instanceof Map)) {
throw new SerDeException("Value not of type map");
}
//noinspection unchecked
Map<String, Object> map = (Map<String, Object>) data;
List<String> fieldNames = type.getAllStructFieldNames();
List<TypeInfo> fieldTypes = type.getAllStructFieldTypeInfos();
// When deserializing a struct the returned value is a list of values in the same order as the field names.
List<Object> values = Lists.newArrayListWithCapacity(fieldNames.size());
for (int i=0; i < fieldNames.size(); i++) {
Object rawValue = getRawValueOrNullIfAbsent(fieldNames.get(i), map);
Object value = deserialize(fieldTypes.get(i), rawValue);
values.add(value);
}
return values;
}
示例3: ArrayWritableObjectInspector
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public ArrayWritableObjectInspector(final StructTypeInfo rowTypeInfo) {
typeInfo = rowTypeInfo;
fieldNames = rowTypeInfo.getAllStructFieldNames();
fieldInfos = rowTypeInfo.getAllStructFieldTypeInfos();
fields = new ArrayList<StructField>(fieldNames.size());
fieldsByName = new HashMap<String, StructFieldImpl>();
for (int i = 0; i < fieldNames.size(); ++i) {
final String name = fieldNames.get(i);
final TypeInfo fieldInfo = fieldInfos.get(i);
final StructFieldImpl field = new StructFieldImpl(name, getObjectInspector(fieldInfo), i);
fields.add(field);
fieldsByName.put(name, field);
}
}
示例4: newFields
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
static Fields newFields(StructTypeInfo structTypeInfo) {
List<String> existingNames = structTypeInfo.getAllStructFieldNames();
List<String> namesList = new ArrayList<>(existingNames.size());
namesList.addAll(existingNames);
String[] names = namesList.toArray(new String[namesList.size()]);
List<TypeInfo> typeInfos = structTypeInfo.getAllStructFieldTypeInfos();
Class<?>[] types = new Class[typeInfos.size()];
for (int i = 0; i < types.length; i++) {
Class<?> type = PRIMITIVES.get(typeInfos.get(i));
if (type == null) {
type = Object.class;
}
types[i] = type;
}
return new Fields(names, types);
}
示例5: ArrayWritableObjectInspector
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public ArrayWritableObjectInspector(final StructTypeInfo rowTypeInfo) {
typeInfo = rowTypeInfo;
fieldNames = rowTypeInfo.getAllStructFieldNames();
fieldInfos = rowTypeInfo.getAllStructFieldTypeInfos();
fields = new ArrayList<StructField>(fieldNames.size());
fieldsByName = new HashMap<String, StructFieldImpl>();
for (int i = 0; i < fieldNames.size(); ++i) {
final String name = fieldNames.get(i);
final TypeInfo fieldInfo = fieldInfos.get(i);
final StructFieldImpl field = new StructFieldImpl(name, getObjectInspector(fieldInfo), i);
fields.add(field);
fieldsByName.put(name, field);
}
}
示例6: parseStruct
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
/**
* Parses a JSON object and its fields. The Hive metadata is used to
* determine how to parse the object fields.
*
* @param field
* - The JSON object to parse
* @param fieldTypeInfo
* - Metadata about the Hive column
* @return - A map representing the object and its fields
*/
@SuppressWarnings("unchecked")
private Object parseStruct(final Object field,
final StructTypeInfo fieldTypeInfo) {
final Map<Object, Object> map = (Map<Object, Object>) field;
final ArrayList<TypeInfo> structTypes = fieldTypeInfo
.getAllStructFieldTypeInfos();
final ArrayList<String> structNames = fieldTypeInfo
.getAllStructFieldNames();
final List<Object> structRow = new ArrayList<Object>(structTypes.size());
for (int i = 0; i < structNames.size(); i++) {
structRow.add(parseField(map.get(structNames.get(i)),
structTypes.get(i)));
}
return structRow;
}
示例7: createObjectInspectorWorker
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
private ObjectInspector createObjectInspectorWorker(TypeInfo ti) throws SerDeException {
switch (ti.getCategory()) {
case PRIMITIVE:
PrimitiveTypeInfo pti = (PrimitiveTypeInfo) ti;
return PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector(pti);
case STRUCT:
StructTypeInfo sti = (StructTypeInfo) ti;
List<ObjectInspector> ois = new ArrayList<ObjectInspector>(sti.getAllStructFieldTypeInfos().size());
for (TypeInfo typeInfo : sti.getAllStructFieldTypeInfos()) {
ois.add(createObjectInspectorWorker(typeInfo));
}
return ObjectInspectorFactory.getStandardStructObjectInspector(sti.getAllStructFieldNames(), ois);
case LIST:
ListTypeInfo lti = (ListTypeInfo) ti;
TypeInfo listElementTypeInfo = lti.getListElementTypeInfo();
return ObjectInspectorFactory.getStandardListObjectInspector(createObjectInspectorWorker(listElementTypeInfo));
default:
throw new SerDeException("No Hive categories matched for [" + ti + "]");
}
}
示例8: getRelDataTypeFromHiveType
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
private RelDataType getRelDataTypeFromHiveType(RelDataTypeFactory typeFactory, TypeInfo typeInfo) {
switch(typeInfo.getCategory()) {
case PRIMITIVE:
return getRelDataTypeFromHivePrimitiveType(typeFactory, ((PrimitiveTypeInfo) typeInfo));
case LIST: {
ListTypeInfo listTypeInfo = (ListTypeInfo)typeInfo;
RelDataType listElemTypeInfo = getRelDataTypeFromHiveType(typeFactory, listTypeInfo.getListElementTypeInfo());
return typeFactory.createArrayType(listElemTypeInfo, -1);
}
case MAP: {
MapTypeInfo mapTypeInfo = (MapTypeInfo)typeInfo;
RelDataType keyType = getRelDataTypeFromHiveType(typeFactory, mapTypeInfo.getMapKeyTypeInfo());
RelDataType valueType = getRelDataTypeFromHiveType(typeFactory, mapTypeInfo.getMapValueTypeInfo());
return typeFactory.createMapType(keyType, valueType);
}
case STRUCT: {
StructTypeInfo structTypeInfo = (StructTypeInfo)typeInfo;
ArrayList<String> fieldNames = structTypeInfo.getAllStructFieldNames();
ArrayList<TypeInfo> fieldHiveTypeInfoList = structTypeInfo.getAllStructFieldTypeInfos();
List<RelDataType> fieldRelDataTypeList = Lists.newArrayList();
for(TypeInfo fieldHiveType : fieldHiveTypeInfoList) {
fieldRelDataTypeList.add(getRelDataTypeFromHiveType(typeFactory, fieldHiveType));
}
return typeFactory.createStructType(fieldRelDataTypeList, fieldNames);
}
case UNION:
logger.warn("There is no UNION data type in SQL. Converting it to Sql type OTHER to avoid " +
"breaking INFORMATION_SCHEMA queries");
return typeFactory.createSqlType(SqlTypeName.OTHER);
}
throwUnsupportedHiveDataTypeError(typeInfo.getCategory().toString());
return null;
}
示例9: parseStruct
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
/**
* Parses a JSON object and its fields. The Hive metadata is used to
* determine how to parse the object fields.
*
* @param field - The JSON object to parse
* @param fieldTypeInfo - Metadata about the Hive column
* @return - A map representing the object and its fields
*/
private Object parseStruct(Object field, StructTypeInfo fieldTypeInfo) {
Map<Object,Object> map = (Map<Object,Object>)field;
ArrayList<TypeInfo> structTypes = fieldTypeInfo.getAllStructFieldTypeInfos();
ArrayList<String> structNames = fieldTypeInfo.getAllStructFieldNames();
List<Object> structRow = new ArrayList<Object>(structTypes.size());
for (int i = 0; i < structNames.size(); i++) {
structRow.add(parseField(map.get(structNames.get(i)), structTypes.get(i)));
}
return structRow;
}
示例10: deserializeStruct
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
private Object deserializeStruct(String columnName, StructTypeInfo columnType) {
// No equivalent Java type for the backing structure, need to recurse
// and build a list
ArrayList<TypeInfo> innerFieldTypes = (ArrayList<TypeInfo>) columnType
.getAllStructFieldTypeInfos();
List<Object> innerObjectRow = new ArrayList<Object>(
innerFieldTypes.size());
List<String> innerColumnNames = columnType.getAllStructFieldNames();
rowElements.add("");
fieldNo++;
return workerBase(innerObjectRow, innerFieldTypes.size(),
innerColumnNames, innerFieldTypes);
}
示例11: parseStruct
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
/**
* Parses a JSON object and its fields. The Hive metadata is used to
* determine how to parse the object fields.
*
* @param field - The JSON object to parse
* @param fieldTypeInfo - Metadata about the Hive column
* @return - A map representing the object and its fields
*/
private Object parseStruct(Object field, StructTypeInfo fieldTypeInfo) {
Map<Object, Object> map = (Map<Object, Object>) field;
ArrayList<TypeInfo> structTypes = fieldTypeInfo.getAllStructFieldTypeInfos();
ArrayList<String> structNames = fieldTypeInfo.getAllStructFieldNames();
List<Object> structRow = new ArrayList<Object>(structTypes.size());
if (map != null) {
for (int i = 0; i < structNames.size(); i++) {
structRow.add(parseField(map.get(structNames.get(i)), structTypes.get(i)));
}
}
return structRow;
}
示例12: PigStructInspector
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
PigStructInspector(StructTypeInfo info) {
ArrayList<String> fieldNames = info.getAllStructFieldNames();
ArrayList<TypeInfo> fieldTypes = info.getAllStructFieldTypeInfos();
fields = new ArrayList<StructField>(fieldNames.size());
for (int i = 0; i < fieldNames.size(); ++i) {
fields.add(new Field(fieldNames.get(i),
createObjectInspector(fieldTypes.get(i)), i));
}
}
示例13: OrcLazyStructObjectInspector
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public OrcLazyStructObjectInspector(StructTypeInfo info) {
ArrayList<String> fieldNames = info.getAllStructFieldNames();
ArrayList<TypeInfo> fieldTypes = info.getAllStructFieldTypeInfos();
fields = new ArrayList<StructField>(fieldNames.size());
for(int i=0; i < fieldNames.size(); ++i) {
fields.add(new Field(fieldNames.get(i),
OrcLazyObjectInspectorUtils.createWritableObjectInspector(fieldTypes.get(i)), i));
}
}
示例14: OrcLazyRowObjectInspector
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public OrcLazyRowObjectInspector(StructTypeInfo info) {
super(info.getAllStructFieldNames().size());
ArrayList<String> fieldNames = info.getAllStructFieldNames();
ArrayList<TypeInfo> fieldTypes = info.getAllStructFieldTypeInfos();
for(int i=0; i < fieldNames.size(); ++i) {
fields.add(new Field(fieldNames.get(i),
OrcLazyObjectInspectorUtils.createLazyObjectInspector(fieldTypes.get(i)), i));
}
}
示例15: OrcStructInspector
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public OrcStructInspector(StructTypeInfo info) {
ArrayList<String> fieldNames = info.getAllStructFieldNames();
ArrayList<TypeInfo> fieldTypes = info.getAllStructFieldTypeInfos();
fields = new ArrayList<StructField>(fieldNames.size());
for(int i=0; i < fieldNames.size(); ++i) {
fields.add(new Field(fieldNames.get(i),
OrcLazyObjectInspectorUtils.createWritableObjectInspector(fieldTypes.get(i)), i));
}
}