本文整理匯總了Java中org.apache.hadoop.hive.serde2.objectinspector.StructField.getFieldName方法的典型用法代碼示例。如果您正苦於以下問題:Java StructField.getFieldName方法的具體用法?Java StructField.getFieldName怎麽用?Java StructField.getFieldName使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hive.serde2.objectinspector.StructField
的用法示例。
在下文中一共展示了StructField.getFieldName方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: deparseStruct
import org.apache.hadoop.hive.serde2.objectinspector.StructField; //導入方法依賴的package包/類
/**
* Deparses struct data into a serializable JSON object.
*
* @param obj - Hive struct data
* @param structOI - ObjectInspector for the struct
* @param isRow - Whether or not this struct represents a top-level row
* @return - A deparsed struct
*/
private Object deparseStruct(Object obj,
StructObjectInspector structOI,
boolean isRow) {
Map<Object,Object> struct = new HashMap<Object,Object>();
List<? extends StructField> fields = structOI.getAllStructFieldRefs();
for (int i = 0; i < fields.size(); i++) {
StructField field = fields.get(i);
// The top-level row object is treated slightly differently from other
// structs, because the field names for the row do not correctly reflect
// the Hive column names. For lower-level structs, we can get the field
// name from the associated StructField object.
String fieldName = isRow ? colNames.get(i) : field.getFieldName();
ObjectInspector fieldOI = field.getFieldObjectInspector();
Object fieldObj = structOI.getStructFieldData(obj, field);
struct.put(fieldName, deparseObject(fieldObj, fieldOI));
}
return struct;
}
示例2: deparseStruct
import org.apache.hadoop.hive.serde2.objectinspector.StructField; //導入方法依賴的package包/類
/**
* Deparses struct data into a serializable JSON object.
*
* @param obj - Hive struct data
* @param structOI - ObjectInspector for the struct
* @param isRow - Whether or not this struct represents a top-level row
* @return - A deparsed struct
*/
private Object deparseStruct(Object obj,
StructObjectInspector structOI,
boolean isRow) {
Map<Object, Object> struct = new HashMap<Object, Object>();
List<? extends StructField> fields = structOI.getAllStructFieldRefs();
for (int i = 0; i < fields.size(); i++) {
StructField field = fields.get(i);
// The top-level row object is treated slightly differently from other
// structs, because the field names for the row do not correctly reflect
// the Hive column names. For lower-level structs, we can get the field
// name from the associated StructField object.
String fieldName = isRow ? colNames.get(i) : field.getFieldName();
ObjectInspector fieldOI = field.getFieldObjectInspector();
Object fieldObj = structOI.getStructFieldData(obj, field);
struct.put(fieldName, deparseObject(fieldObj, fieldOI));
}
return struct;
}
示例3: deparseStruct
import org.apache.hadoop.hive.serde2.objectinspector.StructField; //導入方法依賴的package包/類
/**
* Deparses struct data into a serializable JSON object.
*
* @param obj
* - Hive struct data
* @param structOI
* - ObjectInspector for the struct
* @param isRow
* - Whether or not this struct represents a top-level row
* @return - A deparsed struct
*/
private Object deparseStruct(final Object obj,
final StructObjectInspector structOI, final boolean isRow) {
final Map<Object, Object> struct = new HashMap<Object, Object>();
final List<? extends StructField> fields = structOI
.getAllStructFieldRefs();
for (int i = 0; i < fields.size(); i++) {
final StructField field = fields.get(i);
// The top-level row object is treated slightly differently from
// other
// structs, because the field names for the row do not correctly
// reflect
// the Hive column names. For lower-level structs, we can get the
// field
// name from the associated StructField object.
final String fieldName = isRow ? colNames.get(i) : field
.getFieldName();
final ObjectInspector fieldOI = field.getFieldObjectInspector();
final Object fieldObj = structOI.getStructFieldData(obj, field);
struct.put(fieldName, deparseObject(fieldObj, fieldOI));
}
return struct;
}
示例4: getAllKey
import org.apache.hadoop.hive.serde2.objectinspector.StructField; //導入方法依賴的package包/類
@Override
public String[] getAllKey() throws IOException{
String[] keys = new String[fieldList.size()];
int i = 0;
for( StructField field : fieldList ){
keys[i] = field.getFieldName();
i++;
}
return keys;
}
示例5: toTableInfo
import org.apache.hadoop.hive.serde2.objectinspector.StructField; //導入方法依賴的package包/類
/**
* Converts to TableDto.
*
* @param table connector table
* @return Metacat table Info
*/
@Override
public TableInfo toTableInfo(final QualifiedName name, final Table table) {
final List<FieldSchema> nonPartitionColumns =
(table.getSd() != null) ? table.getSd().getCols() : Collections.emptyList();
// add the data fields to the nonPartitionColumns
//ignore all exceptions
try {
if (nonPartitionColumns.isEmpty()) {
for (StructField field : HiveTableUtil.getTableStructFields(table)) {
final FieldSchema fieldSchema = new FieldSchema(field.getFieldName(),
field.getFieldObjectInspector().getTypeName(),
field.getFieldComment());
nonPartitionColumns.add(fieldSchema);
}
}
} catch (final Exception e) {
log.error(e.getMessage(), e);
}
final List<FieldSchema> partitionColumns = table.getPartitionKeys();
final Date creationDate = table.isSetCreateTime() ? epochSecondsToDate(table.getCreateTime()) : null;
final List<FieldInfo> allFields =
Lists.newArrayListWithCapacity(nonPartitionColumns.size() + partitionColumns.size());
nonPartitionColumns.stream()
.map(field -> hiveToMetacatField(field, false))
.forEachOrdered(allFields::add);
partitionColumns.stream()
.map(field -> hiveToMetacatField(field, true))
.forEachOrdered(allFields::add);
final AuditInfo auditInfo = AuditInfo.builder().createdDate(creationDate).build();
return TableInfo.builder()
.serde(toStorageInfo(table.getSd(), table.getOwner())).fields(allFields)
.metadata(table.getParameters()).name(name).auditInfo(auditInfo)
.build();
}