当前位置: 首页>>代码示例>>Java>>正文


Java StructTypeInfo.getAllStructFieldTypeInfos方法代码示例

本文整理汇总了Java中org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo.getAllStructFieldTypeInfos方法的典型用法代码示例。如果您正苦于以下问题:Java StructTypeInfo.getAllStructFieldTypeInfos方法的具体用法?Java StructTypeInfo.getAllStructFieldTypeInfos怎么用?Java StructTypeInfo.getAllStructFieldTypeInfos使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo的用法示例。


在下文中一共展示了StructTypeInfo.getAllStructFieldTypeInfos方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createStructObjectInspector

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public static LazySimpleStructObjectInspector createStructObjectInspector(TypeInfo type,
			LazySerDeParameters serdeParams) {
		StructTypeInfo structTypeInfo = (StructTypeInfo) type;
		List<String> fieldNames = structTypeInfo.getAllStructFieldNames();
		List<TypeInfo> fieldTypeInfos = structTypeInfo.getAllStructFieldTypeInfos();
		List<ObjectInspector> fieldObjectInspectors = new ArrayList<ObjectInspector>(fieldTypeInfos.size());

		for (int i = 0; i < fieldTypeInfos.size(); i++) {
			fieldObjectInspectors.add(createObjectInspector(fieldTypeInfos.get(i), serdeParams));
		}

		 return LazyObjectInspectorFactory.getLazySimpleStructObjectInspector(
			 fieldNames, fieldObjectInspectors, null,
			 serdeParams.getSeparators()[1],
			 serdeParams, ObjectInspectorOptions.JAVA);

//		return ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldObjectInspectors);
	}
 
开发者ID:mini666,项目名称:hive-phoenix-handler,代码行数:19,代码来源:PhoenixObjectInspectorFactory.java

示例2: deserializeStruct

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
private Object deserializeStruct(StructTypeInfo type, Object data)
        throws SerDeException {
    if (!(data instanceof Map)) {
        throw new SerDeException("Value not of type map");
    }
    //noinspection unchecked
    Map<String, Object> map = (Map<String, Object>) data;

    List<String> fieldNames = type.getAllStructFieldNames();
    List<TypeInfo> fieldTypes = type.getAllStructFieldTypeInfos();

    // When deserializing a struct the returned value is a list of values in the same order as the field names.

    List<Object> values = Lists.newArrayListWithCapacity(fieldNames.size());
    for (int i=0; i < fieldNames.size(); i++) {
        Object rawValue = getRawValueOrNullIfAbsent(fieldNames.get(i), map);
        Object value = deserialize(fieldTypes.get(i), rawValue);
        values.add(value);
    }

    return values;
}
 
开发者ID:bazaarvoice,项目名称:emodb,代码行数:23,代码来源:EmoSerDe.java

示例3: ArrayWritableObjectInspector

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public ArrayWritableObjectInspector(final StructTypeInfo rowTypeInfo) {

        typeInfo = rowTypeInfo;
        fieldNames = rowTypeInfo.getAllStructFieldNames();
        fieldInfos = rowTypeInfo.getAllStructFieldTypeInfos();
        fields = new ArrayList<StructField>(fieldNames.size());
        fieldsByName = new HashMap<String, StructFieldImpl>();

        for (int i = 0; i < fieldNames.size(); ++i) {
            final String name = fieldNames.get(i);
            final TypeInfo fieldInfo = fieldInfos.get(i);

            final StructFieldImpl field = new StructFieldImpl(name, getObjectInspector(fieldInfo), i);
            fields.add(field);
            fieldsByName.put(name, field);
        }
    }
 
开发者ID:shunfei,项目名称:indexr,代码行数:18,代码来源:ArrayWritableObjectInspector.java

示例4: newFields

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
static Fields newFields(StructTypeInfo structTypeInfo) {
  List<String> existingNames = structTypeInfo.getAllStructFieldNames();
  List<String> namesList = new ArrayList<>(existingNames.size());
  namesList.addAll(existingNames);
  String[] names = namesList.toArray(new String[namesList.size()]);

  List<TypeInfo> typeInfos = structTypeInfo.getAllStructFieldTypeInfos();
  Class<?>[] types = new Class[typeInfos.size()];
  for (int i = 0; i < types.length; i++) {
    Class<?> type = PRIMITIVES.get(typeInfos.get(i));
    if (type == null) {
      type = Object.class;
    }
    types[i] = type;
  }

  return new Fields(names, types);
}
 
开发者ID:HotelsDotCom,项目名称:corc,代码行数:19,代码来源:SchemaFactory.java

示例5: ArrayWritableObjectInspector

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public ArrayWritableObjectInspector(final StructTypeInfo rowTypeInfo) {

    typeInfo = rowTypeInfo;
    fieldNames = rowTypeInfo.getAllStructFieldNames();
    fieldInfos = rowTypeInfo.getAllStructFieldTypeInfos();
    fields = new ArrayList<StructField>(fieldNames.size());
    fieldsByName = new HashMap<String, StructFieldImpl>();

    for (int i = 0; i < fieldNames.size(); ++i) {
      final String name = fieldNames.get(i);
      final TypeInfo fieldInfo = fieldInfos.get(i);

      final StructFieldImpl field = new StructFieldImpl(name, getObjectInspector(fieldInfo), i);
      fields.add(field);
      fieldsByName.put(name, field);
    }
  }
 
开发者ID:apache,项目名称:parquet-mr,代码行数:18,代码来源:ArrayWritableObjectInspector.java

示例6: parseStruct

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
/**
 * Parses a JSON object and its fields. The Hive metadata is used to
 * determine how to parse the object fields.
 *
 * @param field
 *            - The JSON object to parse
 * @param fieldTypeInfo
 *            - Metadata about the Hive column
 * @return - A map representing the object and its fields
 */
@SuppressWarnings("unchecked")
private Object parseStruct(final Object field,
		final StructTypeInfo fieldTypeInfo) {
	final Map<Object, Object> map = (Map<Object, Object>) field;
	final ArrayList<TypeInfo> structTypes = fieldTypeInfo
			.getAllStructFieldTypeInfos();
	final ArrayList<String> structNames = fieldTypeInfo
			.getAllStructFieldNames();
	final List<Object> structRow = new ArrayList<Object>(structTypes.size());
	for (int i = 0; i < structNames.size(); i++) {
		structRow.add(parseField(map.get(structNames.get(i)),
				structTypes.get(i)));
	}
	return structRow;
}
 
开发者ID:jaibeermalik,项目名称:searchanalytics-bigdata,代码行数:26,代码来源:JSONSerDe.java

示例7: createObjectInspectorWorker

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
private ObjectInspector createObjectInspectorWorker(TypeInfo ti) throws SerDeException {
  switch (ti.getCategory()) {
  case PRIMITIVE:
    PrimitiveTypeInfo pti = (PrimitiveTypeInfo) ti;
    return PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector(pti);
  case STRUCT:
    StructTypeInfo sti = (StructTypeInfo) ti;
    List<ObjectInspector> ois = new ArrayList<ObjectInspector>(sti.getAllStructFieldTypeInfos().size());
    for (TypeInfo typeInfo : sti.getAllStructFieldTypeInfos()) {
      ois.add(createObjectInspectorWorker(typeInfo));
    }
    return ObjectInspectorFactory.getStandardStructObjectInspector(sti.getAllStructFieldNames(), ois);
  case LIST:
    ListTypeInfo lti = (ListTypeInfo) ti;
    TypeInfo listElementTypeInfo = lti.getListElementTypeInfo();
    return ObjectInspectorFactory.getStandardListObjectInspector(createObjectInspectorWorker(listElementTypeInfo));
  default:
    throw new SerDeException("No Hive categories matched for [" + ti + "]");
  }
}
 
开发者ID:apache,项目名称:incubator-blur,代码行数:21,代码来源:BlurObjectInspectorGenerator.java

示例8: getRelDataTypeFromHiveType

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
private RelDataType getRelDataTypeFromHiveType(RelDataTypeFactory typeFactory, TypeInfo typeInfo) {
  switch(typeInfo.getCategory()) {
    case PRIMITIVE:
      return getRelDataTypeFromHivePrimitiveType(typeFactory, ((PrimitiveTypeInfo) typeInfo));

    case LIST: {
      ListTypeInfo listTypeInfo = (ListTypeInfo)typeInfo;
      RelDataType listElemTypeInfo = getRelDataTypeFromHiveType(typeFactory, listTypeInfo.getListElementTypeInfo());
      return typeFactory.createArrayType(listElemTypeInfo, -1);
    }

    case MAP: {
      MapTypeInfo mapTypeInfo = (MapTypeInfo)typeInfo;
      RelDataType keyType = getRelDataTypeFromHiveType(typeFactory, mapTypeInfo.getMapKeyTypeInfo());
      RelDataType valueType = getRelDataTypeFromHiveType(typeFactory, mapTypeInfo.getMapValueTypeInfo());
      return typeFactory.createMapType(keyType, valueType);
    }

    case STRUCT: {
      StructTypeInfo structTypeInfo = (StructTypeInfo)typeInfo;
      ArrayList<String> fieldNames = structTypeInfo.getAllStructFieldNames();
      ArrayList<TypeInfo> fieldHiveTypeInfoList = structTypeInfo.getAllStructFieldTypeInfos();
      List<RelDataType> fieldRelDataTypeList = Lists.newArrayList();
      for(TypeInfo fieldHiveType : fieldHiveTypeInfoList) {
        fieldRelDataTypeList.add(getRelDataTypeFromHiveType(typeFactory, fieldHiveType));
      }
      return typeFactory.createStructType(fieldRelDataTypeList, fieldNames);
    }

    case UNION:
      logger.warn("There is no UNION data type in SQL. Converting it to Sql type OTHER to avoid " +
          "breaking INFORMATION_SCHEMA queries");
      return typeFactory.createSqlType(SqlTypeName.OTHER);
  }

  throwUnsupportedHiveDataTypeError(typeInfo.getCategory().toString());
  return null;
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:39,代码来源:DrillHiveTable.java

示例9: parseStruct

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
/**
 * Parses a JSON object and its fields. The Hive metadata is used to
 * determine how to parse the object fields.
 *
 * @param field - The JSON object to parse
 * @param fieldTypeInfo - Metadata about the Hive column
 * @return - A map representing the object and its fields
 */
private Object parseStruct(Object field, StructTypeInfo fieldTypeInfo) {
    Map<Object,Object> map = (Map<Object,Object>)field;
    ArrayList<TypeInfo> structTypes = fieldTypeInfo.getAllStructFieldTypeInfos();
    ArrayList<String> structNames = fieldTypeInfo.getAllStructFieldNames();

    List<Object> structRow = new ArrayList<Object>(structTypes.size());
    for (int i = 0; i < structNames.size(); i++) {
        structRow.add(parseField(map.get(structNames.get(i)), structTypes.get(i)));
    }
    return structRow;
}
 
开发者ID:scaleoutsoftware,项目名称:hServer,代码行数:20,代码来源:JsonSerDe.java

示例10: deserializeStruct

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
private Object deserializeStruct(String columnName, StructTypeInfo columnType) {
	// No equivalent Java type for the backing structure, need to recurse
	// and build a list
	ArrayList<TypeInfo> innerFieldTypes = (ArrayList<TypeInfo>) columnType
			.getAllStructFieldTypeInfos();
	List<Object> innerObjectRow = new ArrayList<Object>(
			innerFieldTypes.size());
	List<String> innerColumnNames = columnType.getAllStructFieldNames();
	rowElements.add("");
	fieldNo++;
	return workerBase(innerObjectRow, innerFieldTypes.size(),
			innerColumnNames, innerFieldTypes);

}
 
开发者ID:rbheemana,项目名称:Cobol-to-Hive,代码行数:15,代码来源:CobolDeserializer.java

示例11: parseStruct

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
/**
 * Parses a JSON object and its fields. The Hive metadata is used to
 * determine how to parse the object fields.
 *
 * @param field         - The JSON object to parse
 * @param fieldTypeInfo - Metadata about the Hive column
 * @return - A map representing the object and its fields
 */
private Object parseStruct(Object field, StructTypeInfo fieldTypeInfo) {
	Map<Object, Object> map = (Map<Object, Object>) field;
	ArrayList<TypeInfo> structTypes = fieldTypeInfo.getAllStructFieldTypeInfos();
	ArrayList<String> structNames = fieldTypeInfo.getAllStructFieldNames();

	List<Object> structRow = new ArrayList<Object>(structTypes.size());
	if (map != null) {
		for (int i = 0; i < structNames.size(); i++) {
			structRow.add(parseField(map.get(structNames.get(i)), structTypes.get(i)));
		}
	}
	return structRow;
}
 
开发者ID:micmiu,项目名称:bigdata-tutorial,代码行数:22,代码来源:JSONCDHSerDe.java

示例12: PigStructInspector

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
PigStructInspector(StructTypeInfo info) {
    ArrayList<String> fieldNames = info.getAllStructFieldNames();
    ArrayList<TypeInfo> fieldTypes = info.getAllStructFieldTypeInfos();
    fields = new ArrayList<StructField>(fieldNames.size());
    for (int i = 0; i < fieldNames.size(); ++i) {
        fields.add(new Field(fieldNames.get(i),
                createObjectInspector(fieldTypes.get(i)), i));
    }
}
 
开发者ID:sigmoidanalytics,项目名称:spork,代码行数:10,代码来源:OrcUtils.java

示例13: OrcLazyStructObjectInspector

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public OrcLazyStructObjectInspector(StructTypeInfo info) {
  ArrayList<String> fieldNames = info.getAllStructFieldNames();
  ArrayList<TypeInfo> fieldTypes = info.getAllStructFieldTypeInfos();
  fields = new ArrayList<StructField>(fieldNames.size());
  for(int i=0; i < fieldNames.size(); ++i) {
    fields.add(new Field(fieldNames.get(i),
        OrcLazyObjectInspectorUtils.createWritableObjectInspector(fieldTypes.get(i)), i));
  }
}
 
开发者ID:facebookarchive,项目名称:hive-dwrf,代码行数:10,代码来源:OrcLazyStructObjectInspector.java

示例14: OrcLazyRowObjectInspector

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public OrcLazyRowObjectInspector(StructTypeInfo info) {
  super(info.getAllStructFieldNames().size());
  ArrayList<String> fieldNames = info.getAllStructFieldNames();
  ArrayList<TypeInfo> fieldTypes = info.getAllStructFieldTypeInfos();
  for(int i=0; i < fieldNames.size(); ++i) {
    fields.add(new Field(fieldNames.get(i),
        OrcLazyObjectInspectorUtils.createLazyObjectInspector(fieldTypes.get(i)), i));
  }
}
 
开发者ID:facebookarchive,项目名称:hive-dwrf,代码行数:10,代码来源:OrcLazyRowObjectInspector.java

示例15: OrcStructInspector

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public OrcStructInspector(StructTypeInfo info) {
  ArrayList<String> fieldNames = info.getAllStructFieldNames();
  ArrayList<TypeInfo> fieldTypes = info.getAllStructFieldTypeInfos();
  fields = new ArrayList<StructField>(fieldNames.size());
  for(int i=0; i < fieldNames.size(); ++i) {
    fields.add(new Field(fieldNames.get(i),
        OrcLazyObjectInspectorUtils.createWritableObjectInspector(fieldTypes.get(i)), i));
  }
}
 
开发者ID:facebookarchive,项目名称:hive-dwrf,代码行数:10,代码来源:OrcStruct.java


注:本文中的org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo.getAllStructFieldTypeInfos方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。