当前位置: 首页>>代码示例>>Java>>正文


Java StructTypeInfo.getAllStructFieldNames方法代码示例

本文整理汇总了Java中org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo.getAllStructFieldNames方法的典型用法代码示例。如果您正苦于以下问题:Java StructTypeInfo.getAllStructFieldNames方法的具体用法?Java StructTypeInfo.getAllStructFieldNames怎么用?Java StructTypeInfo.getAllStructFieldNames使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo的用法示例。


在下文中一共展示了StructTypeInfo.getAllStructFieldNames方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: toMetacatType

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
@Override
public Type toMetacatType(final String type) {
    // Hack to fix presto "varchar" type coming in with no length which is required by Hive.
    final TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(
        "varchar".equals(type.toLowerCase()) ? serdeConstants.STRING_TYPE_NAME : type);
    ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
    // The standard struct object inspector forces field names to lower case, however in Metacat we need to preserve
    // the original case of the struct fields so we wrap it with our wrapper to force the fieldNames to keep
    // their original case
    if (typeInfo.getCategory().equals(ObjectInspector.Category.STRUCT)) {
        final StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
        final StandardStructObjectInspector objectInspector = (StandardStructObjectInspector) oi;
        oi = new HiveTypeConverter.SameCaseStandardStructObjectInspector(
            structTypeInfo.getAllStructFieldNames(), objectInspector);
    }
    return getCanonicalType(oi);
}
 
开发者ID:Netflix,项目名称:metacat,代码行数:18,代码来源:HiveTypeConverter.java

示例2: createStructObjectInspector

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public static LazySimpleStructObjectInspector createStructObjectInspector(TypeInfo type,
			LazySerDeParameters serdeParams) {
		StructTypeInfo structTypeInfo = (StructTypeInfo) type;
		List<String> fieldNames = structTypeInfo.getAllStructFieldNames();
		List<TypeInfo> fieldTypeInfos = structTypeInfo.getAllStructFieldTypeInfos();
		List<ObjectInspector> fieldObjectInspectors = new ArrayList<ObjectInspector>(fieldTypeInfos.size());

		for (int i = 0; i < fieldTypeInfos.size(); i++) {
			fieldObjectInspectors.add(createObjectInspector(fieldTypeInfos.get(i), serdeParams));
		}

		 return LazyObjectInspectorFactory.getLazySimpleStructObjectInspector(
			 fieldNames, fieldObjectInspectors, null,
			 serdeParams.getSeparators()[1],
			 serdeParams, ObjectInspectorOptions.JAVA);

//		return ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldObjectInspectors);
	}
 
开发者ID:mini666,项目名称:hive-phoenix-handler,代码行数:19,代码来源:PhoenixObjectInspectorFactory.java

示例3: deserializeStruct

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
private Object deserializeStruct(StructTypeInfo type, Object data)
        throws SerDeException {
    if (!(data instanceof Map)) {
        throw new SerDeException("Value not of type map");
    }
    //noinspection unchecked
    Map<String, Object> map = (Map<String, Object>) data;

    List<String> fieldNames = type.getAllStructFieldNames();
    List<TypeInfo> fieldTypes = type.getAllStructFieldTypeInfos();

    // When deserializing a struct the returned value is a list of values in the same order as the field names.

    List<Object> values = Lists.newArrayListWithCapacity(fieldNames.size());
    for (int i=0; i < fieldNames.size(); i++) {
        Object rawValue = getRawValueOrNullIfAbsent(fieldNames.get(i), map);
        Object value = deserialize(fieldTypes.get(i), rawValue);
        values.add(value);
    }

    return values;
}
 
开发者ID:bazaarvoice,项目名称:emodb,代码行数:23,代码来源:EmoSerDe.java

示例4: ArrayWritableObjectInspector

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public ArrayWritableObjectInspector(final StructTypeInfo rowTypeInfo) {

        typeInfo = rowTypeInfo;
        fieldNames = rowTypeInfo.getAllStructFieldNames();
        fieldInfos = rowTypeInfo.getAllStructFieldTypeInfos();
        fields = new ArrayList<StructField>(fieldNames.size());
        fieldsByName = new HashMap<String, StructFieldImpl>();

        for (int i = 0; i < fieldNames.size(); ++i) {
            final String name = fieldNames.get(i);
            final TypeInfo fieldInfo = fieldInfos.get(i);

            final StructFieldImpl field = new StructFieldImpl(name, getObjectInspector(fieldInfo), i);
            fields.add(field);
            fieldsByName.put(name, field);
        }
    }
 
开发者ID:shunfei,项目名称:indexr,代码行数:18,代码来源:ArrayWritableObjectInspector.java

示例5: newFields

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
static Fields newFields(StructTypeInfo structTypeInfo) {
  List<String> existingNames = structTypeInfo.getAllStructFieldNames();
  List<String> namesList = new ArrayList<>(existingNames.size());
  namesList.addAll(existingNames);
  String[] names = namesList.toArray(new String[namesList.size()]);

  List<TypeInfo> typeInfos = structTypeInfo.getAllStructFieldTypeInfos();
  Class<?>[] types = new Class[typeInfos.size()];
  for (int i = 0; i < types.length; i++) {
    Class<?> type = PRIMITIVES.get(typeInfos.get(i));
    if (type == null) {
      type = Object.class;
    }
    types[i] = type;
  }

  return new Fields(names, types);
}
 
开发者ID:HotelsDotCom,项目名称:corc,代码行数:19,代码来源:SchemaFactory.java

示例6: ArrayWritableObjectInspector

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public ArrayWritableObjectInspector(final StructTypeInfo rowTypeInfo) {

    typeInfo = rowTypeInfo;
    fieldNames = rowTypeInfo.getAllStructFieldNames();
    fieldInfos = rowTypeInfo.getAllStructFieldTypeInfos();
    fields = new ArrayList<StructField>(fieldNames.size());
    fieldsByName = new HashMap<String, StructFieldImpl>();

    for (int i = 0; i < fieldNames.size(); ++i) {
      final String name = fieldNames.get(i);
      final TypeInfo fieldInfo = fieldInfos.get(i);

      final StructFieldImpl field = new StructFieldImpl(name, getObjectInspector(fieldInfo), i);
      fields.add(field);
      fieldsByName.put(name, field);
    }
  }
 
开发者ID:apache,项目名称:parquet-mr,代码行数:18,代码来源:ArrayWritableObjectInspector.java

示例7: parseStruct

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
/**
 * Parses a JSON object and its fields. The Hive metadata is used to
 * determine how to parse the object fields.
 *
 * @param field
 *            - The JSON object to parse
 * @param fieldTypeInfo
 *            - Metadata about the Hive column
 * @return - A map representing the object and its fields
 */
@SuppressWarnings("unchecked")
private Object parseStruct(final Object field,
		final StructTypeInfo fieldTypeInfo) {
	final Map<Object, Object> map = (Map<Object, Object>) field;
	final ArrayList<TypeInfo> structTypes = fieldTypeInfo
			.getAllStructFieldTypeInfos();
	final ArrayList<String> structNames = fieldTypeInfo
			.getAllStructFieldNames();
	final List<Object> structRow = new ArrayList<Object>(structTypes.size());
	for (int i = 0; i < structNames.size(); i++) {
		structRow.add(parseField(map.get(structNames.get(i)),
				structTypes.get(i)));
	}
	return structRow;
}
 
开发者ID:jaibeermalik,项目名称:searchanalytics-bigdata,代码行数:26,代码来源:JSONSerDe.java

示例8: getRelDataTypeFromHiveType

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
private RelDataType getRelDataTypeFromHiveType(RelDataTypeFactory typeFactory, TypeInfo typeInfo) {
  switch(typeInfo.getCategory()) {
    case PRIMITIVE:
      return getRelDataTypeFromHivePrimitiveType(typeFactory, ((PrimitiveTypeInfo) typeInfo));

    case LIST: {
      ListTypeInfo listTypeInfo = (ListTypeInfo)typeInfo;
      RelDataType listElemTypeInfo = getRelDataTypeFromHiveType(typeFactory, listTypeInfo.getListElementTypeInfo());
      return typeFactory.createArrayType(listElemTypeInfo, -1);
    }

    case MAP: {
      MapTypeInfo mapTypeInfo = (MapTypeInfo)typeInfo;
      RelDataType keyType = getRelDataTypeFromHiveType(typeFactory, mapTypeInfo.getMapKeyTypeInfo());
      RelDataType valueType = getRelDataTypeFromHiveType(typeFactory, mapTypeInfo.getMapValueTypeInfo());
      return typeFactory.createMapType(keyType, valueType);
    }

    case STRUCT: {
      StructTypeInfo structTypeInfo = (StructTypeInfo)typeInfo;
      ArrayList<String> fieldNames = structTypeInfo.getAllStructFieldNames();
      ArrayList<TypeInfo> fieldHiveTypeInfoList = structTypeInfo.getAllStructFieldTypeInfos();
      List<RelDataType> fieldRelDataTypeList = Lists.newArrayList();
      for(TypeInfo fieldHiveType : fieldHiveTypeInfoList) {
        fieldRelDataTypeList.add(getRelDataTypeFromHiveType(typeFactory, fieldHiveType));
      }
      return typeFactory.createStructType(fieldRelDataTypeList, fieldNames);
    }

    case UNION:
      logger.warn("There is no UNION data type in SQL. Converting it to Sql type OTHER to avoid " +
          "breaking INFORMATION_SCHEMA queries");
      return typeFactory.createSqlType(SqlTypeName.OTHER);
  }

  throwUnsupportedHiveDataTypeError(typeInfo.getCategory().toString());
  return null;
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:39,代码来源:DrillHiveTable.java

示例9: OrcStructFormatter

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public OrcStructFormatter( final StructTypeInfo typeInfo ){
  container = new ArrayList<Object>();
  childContainer = new ArrayList<KeyAndFormatter>();

  for( String fieldName : typeInfo.getAllStructFieldNames() ){
    TypeInfo childTypeInfo = typeInfo.getStructFieldTypeInfo( fieldName );
    childContainer.add( new KeyAndFormatter( fieldName , childTypeInfo ) );
  }
}
 
开发者ID:yahoojapan,项目名称:dataplatform-schema-lib,代码行数:10,代码来源:OrcStructFormatter.java

示例10: getTypeSignature

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
@Nonnull
private static TypeSignature getTypeSignature(TypeInfo typeInfo)
{
    switch (typeInfo.getCategory()) {
        case PRIMITIVE:
            PrimitiveObjectInspector.PrimitiveCategory primitiveCategory = ((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory();
            Type primitiveType = getPrimitiveType(primitiveCategory);
            if (primitiveType == null) {
                break;
            }
            return primitiveType.getTypeSignature();
        case MAP:
            MapTypeInfo mapTypeInfo = checkType(typeInfo, MapTypeInfo.class, "fieldInspector");
            TypeSignature keyType = getTypeSignature(mapTypeInfo.getMapKeyTypeInfo());
            TypeSignature valueType = getTypeSignature(mapTypeInfo.getMapValueTypeInfo());
            return new TypeSignature(
                    StandardTypes.MAP,
                    ImmutableList.of(TypeSignatureParameter.of(keyType), TypeSignatureParameter.of(valueType)));
        case LIST:
            ListTypeInfo listTypeInfo = checkType(typeInfo, ListTypeInfo.class, "fieldInspector");
            TypeSignature elementType = getTypeSignature(listTypeInfo.getListElementTypeInfo());
            return new TypeSignature(
                    StandardTypes.ARRAY,
                    ImmutableList.of(TypeSignatureParameter.of(elementType)));
        case STRUCT:
            StructTypeInfo structTypeInfo = checkType(typeInfo, StructTypeInfo.class, "fieldInspector");
            List<TypeSignature> fieldTypes = structTypeInfo.getAllStructFieldTypeInfos()
                    .stream()
                    .map(HiveType::getTypeSignature)
                    .collect(toList());
            return new TypeSignature(StandardTypes.ROW, fieldTypes, structTypeInfo.getAllStructFieldNames());
    }
    throw new PrestoException(NOT_SUPPORTED, format("Unsupported Hive type: %s", typeInfo));
}
 
开发者ID:y-lan,项目名称:presto,代码行数:35,代码来源:HiveType.java

示例11: setReadColumns

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
/**
 * Sets which fields are to be read from the ORC file
 */
static void setReadColumns(Configuration conf, StructTypeInfo actualStructTypeInfo) {
  StructTypeInfo readStructTypeInfo = getTypeInfo(conf);
  LOG.info("Read StructTypeInfo: {}", readStructTypeInfo);

  List<Integer> ids = new ArrayList<>();
  List<String> names = new ArrayList<>();

  List<String> readNames = readStructTypeInfo.getAllStructFieldNames();
  List<String> actualNames = actualStructTypeInfo.getAllStructFieldNames();

  for (int i = 0; i < actualNames.size(); i++) {
    String actualName = actualNames.get(i);
    if (readNames.contains(actualName)) {
      // make sure they are the same type
      TypeInfo actualTypeInfo = actualStructTypeInfo.getStructFieldTypeInfo(actualName);
      TypeInfo readTypeInfo = readStructTypeInfo.getStructFieldTypeInfo(actualName);
      if (!actualTypeInfo.equals(readTypeInfo)) {
        throw new IllegalStateException("readTypeInfo [" + readTypeInfo + "] does not match actualTypeInfo ["
            + actualTypeInfo + "]");
      }
      // mark the column as to-be-read
      ids.add(i);
      names.add(actualName);
    }
  }
  if (ids.size() == 0) {
    throw new IllegalStateException("None of the selected columns were found in the ORC file.");
  }
  LOG.info("Set column projection on columns: {} ({})", ids, names);
  ColumnProjectionUtils.appendReadColumns(conf, ids, names);
}
 
开发者ID:HotelsDotCom,项目名称:corc,代码行数:35,代码来源:CorcInputFormat.java

示例12: extractRowStruct

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
private StructTypeInfo extractRowStruct(StructTypeInfo typeInfo) {
  List<String> actualNames = typeInfo.getAllStructFieldNames();
  if (actualNames.size() < ATOMIC_ROW_COLUMN_ID + 1) {
    throw new IllegalArgumentException("Too few rows for a transactional table: " + actualNames);
  }
  String rowStructName = actualNames.get(ATOMIC_ROW_COLUMN_ID);
  if (!ATOMIC_ROW_COLUMN_NAME.equalsIgnoreCase(rowStructName)) {
    throw new IllegalArgumentException("Expected row column name '" + ATOMIC_ROW_COLUMN_NAME + "', found: "
        + rowStructName);
  }
  StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo.getStructFieldTypeInfo(rowStructName);
  LOG.debug("Row StructTypeInfo defined as: {}", structTypeInfo);
  return structTypeInfo;
}
 
开发者ID:HotelsDotCom,项目名称:corc,代码行数:15,代码来源:CorcInputFormat.java

示例13: RecordServiceObjectInspector

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public RecordServiceObjectInspector(StructTypeInfo rowTypeInfo) {
  List<String> fieldNames = rowTypeInfo.getAllStructFieldNames();
  fields_ = Lists.newArrayListWithExpectedSize(fieldNames.size());
  fieldsByName_ = Maps.newHashMap();

  for (int fieldIdx = 0; fieldIdx < fieldNames.size(); ++fieldIdx) {
    final String name = fieldNames.get(fieldIdx);
    final TypeInfo fieldInfo = rowTypeInfo.getAllStructFieldTypeInfos().get(fieldIdx);
    RecordServiceStructField fieldImpl = new RecordServiceStructField(name,
        getFieldObjectInspector(fieldInfo), fieldIdx);
    fields_.add(fieldImpl);
    fieldsByName_.put(name.toLowerCase(), fieldImpl);
  }
}
 
开发者ID:cloudera,项目名称:RecordServiceClient,代码行数:15,代码来源:RecordServiceObjectInspector.java

示例14: parseStruct

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
/**
 * Parses a JSON object and its fields. The Hive metadata is used to
 * determine how to parse the object fields.
 *
 * @param field - The JSON object to parse
 * @param fieldTypeInfo - Metadata about the Hive column
 * @return - A map representing the object and its fields
 */
private Object parseStruct(Object field, StructTypeInfo fieldTypeInfo) {
    Map<Object,Object> map = (Map<Object,Object>)field;
    ArrayList<TypeInfo> structTypes = fieldTypeInfo.getAllStructFieldTypeInfos();
    ArrayList<String> structNames = fieldTypeInfo.getAllStructFieldNames();

    List<Object> structRow = new ArrayList<Object>(structTypes.size());
    for (int i = 0; i < structNames.size(); i++) {
        structRow.add(parseField(map.get(structNames.get(i)), structTypes.get(i)));
    }
    return structRow;
}
 
开发者ID:scaleoutsoftware,项目名称:hServer,代码行数:20,代码来源:JsonSerDe.java

示例15: deserializeStruct

import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
private Object deserializeStruct(String columnName, StructTypeInfo columnType) {
	// No equivalent Java type for the backing structure, need to recurse
	// and build a list
	ArrayList<TypeInfo> innerFieldTypes = (ArrayList<TypeInfo>) columnType
			.getAllStructFieldTypeInfos();
	List<Object> innerObjectRow = new ArrayList<Object>(
			innerFieldTypes.size());
	List<String> innerColumnNames = columnType.getAllStructFieldNames();
	rowElements.add("");
	fieldNo++;
	return workerBase(innerObjectRow, innerFieldTypes.size(),
			innerColumnNames, innerFieldTypes);

}
 
开发者ID:rbheemana,项目名称:Cobol-to-Hive,代码行数:15,代码来源:CobolDeserializer.java


注:本文中的org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo.getAllStructFieldNames方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。