本文整理汇总了Java中org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo.getAllStructFieldNames方法的典型用法代码示例。如果您正苦于以下问题:Java StructTypeInfo.getAllStructFieldNames方法的具体用法?Java StructTypeInfo.getAllStructFieldNames怎么用?Java StructTypeInfo.getAllStructFieldNames使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo
的用法示例。
在下文中一共展示了StructTypeInfo.getAllStructFieldNames方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: toMetacatType
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
@Override
public Type toMetacatType(final String type) {
// Hack to fix presto "varchar" type coming in with no length which is required by Hive.
final TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(
"varchar".equals(type.toLowerCase()) ? serdeConstants.STRING_TYPE_NAME : type);
ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
// The standard struct object inspector forces field names to lower case, however in Metacat we need to preserve
// the original case of the struct fields so we wrap it with our wrapper to force the fieldNames to keep
// their original case
if (typeInfo.getCategory().equals(ObjectInspector.Category.STRUCT)) {
final StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
final StandardStructObjectInspector objectInspector = (StandardStructObjectInspector) oi;
oi = new HiveTypeConverter.SameCaseStandardStructObjectInspector(
structTypeInfo.getAllStructFieldNames(), objectInspector);
}
return getCanonicalType(oi);
}
示例2: createStructObjectInspector
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public static LazySimpleStructObjectInspector createStructObjectInspector(TypeInfo type,
LazySerDeParameters serdeParams) {
StructTypeInfo structTypeInfo = (StructTypeInfo) type;
List<String> fieldNames = structTypeInfo.getAllStructFieldNames();
List<TypeInfo> fieldTypeInfos = structTypeInfo.getAllStructFieldTypeInfos();
List<ObjectInspector> fieldObjectInspectors = new ArrayList<ObjectInspector>(fieldTypeInfos.size());
for (int i = 0; i < fieldTypeInfos.size(); i++) {
fieldObjectInspectors.add(createObjectInspector(fieldTypeInfos.get(i), serdeParams));
}
return LazyObjectInspectorFactory.getLazySimpleStructObjectInspector(
fieldNames, fieldObjectInspectors, null,
serdeParams.getSeparators()[1],
serdeParams, ObjectInspectorOptions.JAVA);
// return ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldObjectInspectors);
}
示例3: deserializeStruct
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
private Object deserializeStruct(StructTypeInfo type, Object data)
throws SerDeException {
if (!(data instanceof Map)) {
throw new SerDeException("Value not of type map");
}
//noinspection unchecked
Map<String, Object> map = (Map<String, Object>) data;
List<String> fieldNames = type.getAllStructFieldNames();
List<TypeInfo> fieldTypes = type.getAllStructFieldTypeInfos();
// When deserializing a struct the returned value is a list of values in the same order as the field names.
List<Object> values = Lists.newArrayListWithCapacity(fieldNames.size());
for (int i=0; i < fieldNames.size(); i++) {
Object rawValue = getRawValueOrNullIfAbsent(fieldNames.get(i), map);
Object value = deserialize(fieldTypes.get(i), rawValue);
values.add(value);
}
return values;
}
示例4: ArrayWritableObjectInspector
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public ArrayWritableObjectInspector(final StructTypeInfo rowTypeInfo) {
typeInfo = rowTypeInfo;
fieldNames = rowTypeInfo.getAllStructFieldNames();
fieldInfos = rowTypeInfo.getAllStructFieldTypeInfos();
fields = new ArrayList<StructField>(fieldNames.size());
fieldsByName = new HashMap<String, StructFieldImpl>();
for (int i = 0; i < fieldNames.size(); ++i) {
final String name = fieldNames.get(i);
final TypeInfo fieldInfo = fieldInfos.get(i);
final StructFieldImpl field = new StructFieldImpl(name, getObjectInspector(fieldInfo), i);
fields.add(field);
fieldsByName.put(name, field);
}
}
示例5: newFields
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
static Fields newFields(StructTypeInfo structTypeInfo) {
List<String> existingNames = structTypeInfo.getAllStructFieldNames();
List<String> namesList = new ArrayList<>(existingNames.size());
namesList.addAll(existingNames);
String[] names = namesList.toArray(new String[namesList.size()]);
List<TypeInfo> typeInfos = structTypeInfo.getAllStructFieldTypeInfos();
Class<?>[] types = new Class[typeInfos.size()];
for (int i = 0; i < types.length; i++) {
Class<?> type = PRIMITIVES.get(typeInfos.get(i));
if (type == null) {
type = Object.class;
}
types[i] = type;
}
return new Fields(names, types);
}
示例6: ArrayWritableObjectInspector
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public ArrayWritableObjectInspector(final StructTypeInfo rowTypeInfo) {
typeInfo = rowTypeInfo;
fieldNames = rowTypeInfo.getAllStructFieldNames();
fieldInfos = rowTypeInfo.getAllStructFieldTypeInfos();
fields = new ArrayList<StructField>(fieldNames.size());
fieldsByName = new HashMap<String, StructFieldImpl>();
for (int i = 0; i < fieldNames.size(); ++i) {
final String name = fieldNames.get(i);
final TypeInfo fieldInfo = fieldInfos.get(i);
final StructFieldImpl field = new StructFieldImpl(name, getObjectInspector(fieldInfo), i);
fields.add(field);
fieldsByName.put(name, field);
}
}
示例7: parseStruct
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
/**
* Parses a JSON object and its fields. The Hive metadata is used to
* determine how to parse the object fields.
*
* @param field
* - The JSON object to parse
* @param fieldTypeInfo
* - Metadata about the Hive column
* @return - A map representing the object and its fields
*/
@SuppressWarnings("unchecked")
private Object parseStruct(final Object field,
final StructTypeInfo fieldTypeInfo) {
final Map<Object, Object> map = (Map<Object, Object>) field;
final ArrayList<TypeInfo> structTypes = fieldTypeInfo
.getAllStructFieldTypeInfos();
final ArrayList<String> structNames = fieldTypeInfo
.getAllStructFieldNames();
final List<Object> structRow = new ArrayList<Object>(structTypes.size());
for (int i = 0; i < structNames.size(); i++) {
structRow.add(parseField(map.get(structNames.get(i)),
structTypes.get(i)));
}
return structRow;
}
示例8: getRelDataTypeFromHiveType
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
private RelDataType getRelDataTypeFromHiveType(RelDataTypeFactory typeFactory, TypeInfo typeInfo) {
switch(typeInfo.getCategory()) {
case PRIMITIVE:
return getRelDataTypeFromHivePrimitiveType(typeFactory, ((PrimitiveTypeInfo) typeInfo));
case LIST: {
ListTypeInfo listTypeInfo = (ListTypeInfo)typeInfo;
RelDataType listElemTypeInfo = getRelDataTypeFromHiveType(typeFactory, listTypeInfo.getListElementTypeInfo());
return typeFactory.createArrayType(listElemTypeInfo, -1);
}
case MAP: {
MapTypeInfo mapTypeInfo = (MapTypeInfo)typeInfo;
RelDataType keyType = getRelDataTypeFromHiveType(typeFactory, mapTypeInfo.getMapKeyTypeInfo());
RelDataType valueType = getRelDataTypeFromHiveType(typeFactory, mapTypeInfo.getMapValueTypeInfo());
return typeFactory.createMapType(keyType, valueType);
}
case STRUCT: {
StructTypeInfo structTypeInfo = (StructTypeInfo)typeInfo;
ArrayList<String> fieldNames = structTypeInfo.getAllStructFieldNames();
ArrayList<TypeInfo> fieldHiveTypeInfoList = structTypeInfo.getAllStructFieldTypeInfos();
List<RelDataType> fieldRelDataTypeList = Lists.newArrayList();
for(TypeInfo fieldHiveType : fieldHiveTypeInfoList) {
fieldRelDataTypeList.add(getRelDataTypeFromHiveType(typeFactory, fieldHiveType));
}
return typeFactory.createStructType(fieldRelDataTypeList, fieldNames);
}
case UNION:
logger.warn("There is no UNION data type in SQL. Converting it to Sql type OTHER to avoid " +
"breaking INFORMATION_SCHEMA queries");
return typeFactory.createSqlType(SqlTypeName.OTHER);
}
throwUnsupportedHiveDataTypeError(typeInfo.getCategory().toString());
return null;
}
示例9: OrcStructFormatter
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public OrcStructFormatter( final StructTypeInfo typeInfo ){
container = new ArrayList<Object>();
childContainer = new ArrayList<KeyAndFormatter>();
for( String fieldName : typeInfo.getAllStructFieldNames() ){
TypeInfo childTypeInfo = typeInfo.getStructFieldTypeInfo( fieldName );
childContainer.add( new KeyAndFormatter( fieldName , childTypeInfo ) );
}
}
示例10: getTypeSignature
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
@Nonnull
private static TypeSignature getTypeSignature(TypeInfo typeInfo)
{
switch (typeInfo.getCategory()) {
case PRIMITIVE:
PrimitiveObjectInspector.PrimitiveCategory primitiveCategory = ((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory();
Type primitiveType = getPrimitiveType(primitiveCategory);
if (primitiveType == null) {
break;
}
return primitiveType.getTypeSignature();
case MAP:
MapTypeInfo mapTypeInfo = checkType(typeInfo, MapTypeInfo.class, "fieldInspector");
TypeSignature keyType = getTypeSignature(mapTypeInfo.getMapKeyTypeInfo());
TypeSignature valueType = getTypeSignature(mapTypeInfo.getMapValueTypeInfo());
return new TypeSignature(
StandardTypes.MAP,
ImmutableList.of(TypeSignatureParameter.of(keyType), TypeSignatureParameter.of(valueType)));
case LIST:
ListTypeInfo listTypeInfo = checkType(typeInfo, ListTypeInfo.class, "fieldInspector");
TypeSignature elementType = getTypeSignature(listTypeInfo.getListElementTypeInfo());
return new TypeSignature(
StandardTypes.ARRAY,
ImmutableList.of(TypeSignatureParameter.of(elementType)));
case STRUCT:
StructTypeInfo structTypeInfo = checkType(typeInfo, StructTypeInfo.class, "fieldInspector");
List<TypeSignature> fieldTypes = structTypeInfo.getAllStructFieldTypeInfos()
.stream()
.map(HiveType::getTypeSignature)
.collect(toList());
return new TypeSignature(StandardTypes.ROW, fieldTypes, structTypeInfo.getAllStructFieldNames());
}
throw new PrestoException(NOT_SUPPORTED, format("Unsupported Hive type: %s", typeInfo));
}
示例11: setReadColumns
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
/**
* Sets which fields are to be read from the ORC file
*/
static void setReadColumns(Configuration conf, StructTypeInfo actualStructTypeInfo) {
StructTypeInfo readStructTypeInfo = getTypeInfo(conf);
LOG.info("Read StructTypeInfo: {}", readStructTypeInfo);
List<Integer> ids = new ArrayList<>();
List<String> names = new ArrayList<>();
List<String> readNames = readStructTypeInfo.getAllStructFieldNames();
List<String> actualNames = actualStructTypeInfo.getAllStructFieldNames();
for (int i = 0; i < actualNames.size(); i++) {
String actualName = actualNames.get(i);
if (readNames.contains(actualName)) {
// make sure they are the same type
TypeInfo actualTypeInfo = actualStructTypeInfo.getStructFieldTypeInfo(actualName);
TypeInfo readTypeInfo = readStructTypeInfo.getStructFieldTypeInfo(actualName);
if (!actualTypeInfo.equals(readTypeInfo)) {
throw new IllegalStateException("readTypeInfo [" + readTypeInfo + "] does not match actualTypeInfo ["
+ actualTypeInfo + "]");
}
// mark the column as to-be-read
ids.add(i);
names.add(actualName);
}
}
if (ids.size() == 0) {
throw new IllegalStateException("None of the selected columns were found in the ORC file.");
}
LOG.info("Set column projection on columns: {} ({})", ids, names);
ColumnProjectionUtils.appendReadColumns(conf, ids, names);
}
示例12: extractRowStruct
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
private StructTypeInfo extractRowStruct(StructTypeInfo typeInfo) {
List<String> actualNames = typeInfo.getAllStructFieldNames();
if (actualNames.size() < ATOMIC_ROW_COLUMN_ID + 1) {
throw new IllegalArgumentException("Too few rows for a transactional table: " + actualNames);
}
String rowStructName = actualNames.get(ATOMIC_ROW_COLUMN_ID);
if (!ATOMIC_ROW_COLUMN_NAME.equalsIgnoreCase(rowStructName)) {
throw new IllegalArgumentException("Expected row column name '" + ATOMIC_ROW_COLUMN_NAME + "', found: "
+ rowStructName);
}
StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo.getStructFieldTypeInfo(rowStructName);
LOG.debug("Row StructTypeInfo defined as: {}", structTypeInfo);
return structTypeInfo;
}
示例13: RecordServiceObjectInspector
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
public RecordServiceObjectInspector(StructTypeInfo rowTypeInfo) {
List<String> fieldNames = rowTypeInfo.getAllStructFieldNames();
fields_ = Lists.newArrayListWithExpectedSize(fieldNames.size());
fieldsByName_ = Maps.newHashMap();
for (int fieldIdx = 0; fieldIdx < fieldNames.size(); ++fieldIdx) {
final String name = fieldNames.get(fieldIdx);
final TypeInfo fieldInfo = rowTypeInfo.getAllStructFieldTypeInfos().get(fieldIdx);
RecordServiceStructField fieldImpl = new RecordServiceStructField(name,
getFieldObjectInspector(fieldInfo), fieldIdx);
fields_.add(fieldImpl);
fieldsByName_.put(name.toLowerCase(), fieldImpl);
}
}
示例14: parseStruct
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
/**
* Parses a JSON object and its fields. The Hive metadata is used to
* determine how to parse the object fields.
*
* @param field - The JSON object to parse
* @param fieldTypeInfo - Metadata about the Hive column
* @return - A map representing the object and its fields
*/
private Object parseStruct(Object field, StructTypeInfo fieldTypeInfo) {
Map<Object,Object> map = (Map<Object,Object>)field;
ArrayList<TypeInfo> structTypes = fieldTypeInfo.getAllStructFieldTypeInfos();
ArrayList<String> structNames = fieldTypeInfo.getAllStructFieldNames();
List<Object> structRow = new ArrayList<Object>(structTypes.size());
for (int i = 0; i < structNames.size(); i++) {
structRow.add(parseField(map.get(structNames.get(i)), structTypes.get(i)));
}
return structRow;
}
示例15: deserializeStruct
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; //导入方法依赖的package包/类
private Object deserializeStruct(String columnName, StructTypeInfo columnType) {
// No equivalent Java type for the backing structure, need to recurse
// and build a list
ArrayList<TypeInfo> innerFieldTypes = (ArrayList<TypeInfo>) columnType
.getAllStructFieldTypeInfos();
List<Object> innerObjectRow = new ArrayList<Object>(
innerFieldTypes.size());
List<String> innerColumnNames = columnType.getAllStructFieldNames();
rowElements.add("");
fieldNo++;
return workerBase(innerObjectRow, innerFieldTypes.size(),
innerColumnNames, innerFieldTypes);
}