當前位置: 首頁>>代碼示例>>Java>>正文


Java StructObjectInspector.getStructFieldData方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector.getStructFieldData方法的典型用法代碼示例。如果您正苦於以下問題:Java StructObjectInspector.getStructFieldData方法的具體用法?Java StructObjectInspector.getStructFieldData怎麽用?Java StructObjectInspector.getStructFieldData使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector的用法示例。


在下文中一共展示了StructObjectInspector.getStructFieldData方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: merge

import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
void merge(@Nonnull final Object partial, @Nonnull final StructObjectInspector mergeOI,
        @Nonnull final StructField[] fields, @Nonnull final ListObjectInspector[] fieldOIs) {
    Preconditions.checkArgument(fields.length == fieldOIs.length);

    final int numFields = fieldOIs.length;
    if (identifiers == null) {
        this.identifiers = new Identifier[numFields];
    }
    Preconditions.checkArgument(fields.length == identifiers.length);

    for (int i = 0; i < numFields; i++) {
        Identifier<Writable> id = identifiers[i];
        if (id == null) {
            id = new Identifier<>(1);
            identifiers[i] = id;
        }
        final Object fieldData = mergeOI.getStructFieldData(partial, fields[i]);
        final ListObjectInspector fieldOI = fieldOIs[i];
        for (int j = 0, size = fieldOI.getListLength(fieldData); j < size; j++) {
            Object o = fieldOI.getListElement(fieldData, j);
            Preconditions.checkNotNull(o);
            id.valueOf((Writable) o);
        }
    }
}
 
開發者ID:apache,項目名稱:incubator-hivemall,代碼行數:27,代碼來源:OnehotEncodingUDAF.java

示例2: serialize

import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; //導入方法依賴的package包/類
@Override
public Writable serialize(Object obj, ObjectInspector objectInspector) throws SerDeException {
    if (!objectInspector.getCategory().equals(ObjectInspector.Category.STRUCT)) {
        throw new SerDeException("Cannot serialize " + objectInspector.getCategory() + ". Can only serialize a struct");
    }

    StructObjectInspector inspector = (StructObjectInspector) objectInspector;
    List<? extends StructField> fields = inspector.getAllStructFieldRefs();
    Writable[] arr = new Writable[fields.size()];
    for (int i = 0; i < fields.size(); i++) {
        StructField field = fields.get(i);
        Object subObj = inspector.getStructFieldData(obj, field);
        ObjectInspector subInspector = field.getFieldObjectInspector();
        arr[i] = createPrimitive(subObj, (PrimitiveObjectInspector) subInspector);
    }
    serdeSize = arr.length;
    return new ArrayWritable(Writable.class, arr);
}
 
開發者ID:shunfei,項目名稱:indexr,代碼行數:19,代碼來源:IndexRSerde.java

示例3: populateData

import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; //導入方法依賴的package包/類
@Override
public int populateData() throws IOException, SerDeException {
  final RecordReader<Object, Object> reader = this.reader;
  final Converter partTblObjectInspectorConverter = this.partTblObjectInspectorConverter;
  final int numRowsPerBatch = (int) this.numRowsPerBatch;

  final StructField[] selectedStructFieldRefs = this.selectedStructFieldRefs;
  final SerDe partitionSerDe = this.partitionSerDe;
  final StructObjectInspector finalOI = this.finalOI;
  final ObjectInspector[] selectedColumnObjInspectors = this.selectedColumnObjInspectors;
  final HiveFieldConverter[] selectedColumnFieldConverters = this.selectedColumnFieldConverters;
  final ValueVector[] vectors = this.vectors;
  final Object key = this.key;
  final Object value = this.value;
  
  int recordCount = 0;
  while (recordCount < numRowsPerBatch && reader.next(key, value)) {
    Object deSerializedValue = partitionSerDe.deserialize((Writable) value);
    if (partTblObjectInspectorConverter != null) {
      deSerializedValue = partTblObjectInspectorConverter.convert(deSerializedValue);
    }
    for (int i = 0; i < selectedStructFieldRefs.length; i++) {
      Object hiveValue = finalOI.getStructFieldData(deSerializedValue, selectedStructFieldRefs[i]);
      if (hiveValue != null) {
        selectedColumnFieldConverters[i].setSafeValue(selectedColumnObjInspectors[i], hiveValue, vectors[i], recordCount);
      }
    }        
    recordCount++;
  }

  return recordCount;
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:33,代碼來源:HiveRecordReaders.java

示例4: extractField

import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; //導入方法依賴的package包/類
@Override
protected Object extractField(Object target) {
    List<String> flNames = fieldNames;

    for (int i = 0; i < flNames.size(); i++) {
        String fl = flNames.get(i);
        if (target instanceof HiveType) {
            HiveType type = (HiveType) target;
            ObjectInspector inspector = type.getObjectInspector();
            if (inspector instanceof StructObjectInspector) {
                StructObjectInspector soi = (StructObjectInspector) inspector;
                StructField field = soi.getStructFieldRef(fl);
                ObjectInspector foi = field.getFieldObjectInspector();
                Assert.isTrue(foi.getCategory() == ObjectInspector.Category.PRIMITIVE,
                        String.format("Field [%s] needs to be a primitive; found [%s]", fl, foi.getTypeName()));

                // expecting a writeable - simply do a toString
                target = soi.getStructFieldData(type.getObject(), field);
            }
            else {
                return FieldExtractor.NOT_FOUND;
            }
        }
        else {
            return FieldExtractor.NOT_FOUND;
        }
    }

    if (target == null || target instanceof NullWritable) {
        return StringUtils.EMPTY;
    }
    return target.toString();
}
 
開發者ID:xushjie1987,項目名稱:es-hadoop-v2.2.0,代碼行數:34,代碼來源:HiveFieldExtractor.java

示例5: serialize

import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; //導入方法依賴的package包/類
@Override
public HiveKuduWritable serialize(Object row, ObjectInspector inspector)
    throws SerDeException {

    final StructObjectInspector structInspector = (StructObjectInspector) inspector;
    final List<? extends StructField> fields = structInspector.getAllStructFieldRefs();
    if (fields.size() != fieldCount) {
        throw new SerDeException(String.format(
                "Required %d columns, received %d.", fieldCount,
                fields.size()));
    }

    cachedWritable.clear();

    for (int i = 0; i < fieldCount; i++) {
        StructField structField = fields.get(i);
        if (structField != null) {
            Object field = structInspector.getStructFieldData(row,
                    structField);
            ObjectInspector fieldOI = structField.getFieldObjectInspector();

            Object javaObject = HiveKuduBridgeUtils.deparseObject(field,
                    fieldOI);
            LOG.warn("Column value of " + i + " is " + javaObject.toString());
            cachedWritable.set(i, javaObject);
        }
    }
    return cachedWritable;
}
 
開發者ID:BimalTandel,項目名稱:HiveKudu-Handler,代碼行數:30,代碼來源:HiveKuduSerDe.java

示例6: populateData

import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; //導入方法依賴的package包/類
@Override
public int populateData() throws IOException, SerDeException {
  final SkipRecordsInspector skipRecordsInspector = this.skipRecordsInspector;
  final RecordReader<Object, Object> reader = this.reader;
  final Converter partTblObjectInspectorConverter = this.partTblObjectInspectorConverter;
  final Object key = this.key;

  final int numRowsPerBatch = (int) this.numRowsPerBatch;

  final StructField[] selectedStructFieldRefs = this.selectedStructFieldRefs;
  final SerDe partitionSerDe = this.partitionSerDe;
  final StructObjectInspector finalOI = this.finalOI;
  final ObjectInspector[] selectedColumnObjInspectors = this.selectedColumnObjInspectors;
  final HiveFieldConverter[] selectedColumnFieldConverters = this.selectedColumnFieldConverters;
  final ValueVector[] vectors = this.vectors;

  skipRecordsInspector.reset();
  Object value;

  int recordCount = 0;

  while (recordCount < numRowsPerBatch && reader.next(key, value = skipRecordsInspector.getNextValue())) {
    if (skipRecordsInspector.doSkipHeader(recordCount++)) {
      continue;
    }
    Object bufferedValue = skipRecordsInspector.bufferAdd(value);
    if (bufferedValue != null) {
      Object deSerializedValue = partitionSerDe.deserialize((Writable) bufferedValue);
      if (partTblObjectInspectorConverter != null) {
        deSerializedValue = partTblObjectInspectorConverter.convert(deSerializedValue);
      }

      for (int i = 0; i < selectedStructFieldRefs.length; i++) {
        Object hiveValue = finalOI.getStructFieldData(deSerializedValue, selectedStructFieldRefs[i]);
        if (hiveValue != null) {
          selectedColumnFieldConverters[i].setSafeValue(selectedColumnObjInspectors[i], hiveValue, vectors[i], skipRecordsInspector.getActualCount());
        }
      }
      skipRecordsInspector.incrementActualCount();
    }
    skipRecordsInspector.incrementTempCount();
  }

  skipRecordsInspector.updateContinuance();
  return skipRecordsInspector.getActualCount();
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:47,代碼來源:HiveTextReader.java


注:本文中的org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector.getStructFieldData方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。