当前位置: 首页>>代码示例>>Java>>正文


Java StorageDescriptor.getSerdeInfo方法代码示例

本文整理汇总了Java中org.apache.hadoop.hive.metastore.api.StorageDescriptor.getSerdeInfo方法的典型用法代码示例。如果您正苦于以下问题:Java StorageDescriptor.getSerdeInfo方法的具体用法?Java StorageDescriptor.getSerdeInfo怎么用?Java StorageDescriptor.getSerdeInfo使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hive.metastore.api.StorageDescriptor的用法示例。


在下文中一共展示了StorageDescriptor.getSerdeInfo方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: StorageDescriptorWrapper

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public StorageDescriptorWrapper(StorageDescriptor sd) {
      this.sd = sd;
      this.cols = Lists.newArrayList();
      for (FieldSchema f : sd.getCols()) {
        this.cols.add(new FieldSchemaWrapper(f));
      }
      this.location = sd.getLocation();
      this.inputFormat = sd.getInputFormat();
      this.outputFormat = sd.getOutputFormat();
      this.compressed = sd.isCompressed();
      this.numBuckets = sd.getNumBuckets();
      this.serDeInfo = new SerDeInfoWrapper(sd.getSerdeInfo());
//      this.bucketCols = sd.getBucketCols();
      this.sortCols = Lists.newArrayList();
      for (Order o : sd.getSortCols()) {
        this.sortCols.add(new OrderWrapper(o));
      }
      this.parameters = sd.getParameters();
    }
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:20,代码来源:HiveTable.java

示例2: extractHiveStorageFormat

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private HiveStorageFormat extractHiveStorageFormat(final Table table) throws MetaException {
    final StorageDescriptor descriptor = table.getSd();
    if (descriptor == null) {
        throw new MetaException("Table is missing storage descriptor");
    }
    final SerDeInfo serdeInfo = descriptor.getSerdeInfo();
    if (serdeInfo == null) {
        throw new MetaException(
            "Table storage descriptor is missing SerDe info");
    }
    final String outputFormat = descriptor.getOutputFormat();
    final String serializationLib = serdeInfo.getSerializationLib();

    for (HiveStorageFormat format : HiveStorageFormat.values()) {
        if (format.getOutputFormat().equals(outputFormat) && format.getSerde().equals(serializationLib)) {
            return format;
        }
    }
    throw new MetaException(
        String.format("Output format %s with SerDe %s is not supported", outputFormat, serializationLib));
}
 
开发者ID:Netflix,项目名称:metacat,代码行数:22,代码来源:HiveConnectorTableService.java

示例3: copyTableSdToPartitionInfoSd

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private void copyTableSdToPartitionInfoSd(final PartitionInfo partitionInfo, final Table table) {
    final StorageInfo sd = partitionInfo.getSerde();
    final StorageDescriptor tableSd = table.getSd();

    if (StringUtils.isBlank(sd.getInputFormat())) {
        sd.setInputFormat(tableSd.getInputFormat());
    }
    if (StringUtils.isBlank(sd.getOutputFormat())) {
        sd.setOutputFormat(tableSd.getOutputFormat());
    }
    if (sd.getParameters() == null || sd.getParameters().isEmpty()) {
        sd.setParameters(tableSd.getParameters());
    }
    final SerDeInfo tableSerde = tableSd.getSerdeInfo();
    if (tableSerde != null) {
        if (StringUtils.isBlank(sd.getSerializationLib())) {
            sd.setSerializationLib(tableSerde.getSerializationLib());
        }
        if (sd.getSerdeInfoParameters() == null || sd.getSerdeInfoParameters().isEmpty()) {
            sd.setSerdeInfoParameters(tableSerde.getParameters());
        }
    }
}
 
开发者ID:Netflix,项目名称:metacat,代码行数:24,代码来源:HiveConnectorFastPartitionService.java

示例4: toStorageInfo

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private StorageInfo toStorageInfo(final StorageDescriptor sd, final String owner) {
    if (sd == null) {
        return new StorageInfo();
    }
    if (sd.getSerdeInfo() != null) {
        return StorageInfo.builder().owner(owner)
            .uri(sd.getLocation())
            .inputFormat(sd.getInputFormat())
            .outputFormat(sd.getOutputFormat())
            .parameters(sd.getParameters())
            .serializationLib(sd.getSerdeInfo().getSerializationLib())
            .serdeInfoParameters(sd.getSerdeInfo().getParameters())
            .build();
    }
    return StorageInfo.builder().owner(owner).uri(sd.getLocation()).inputFormat(sd.getInputFormat())
        .outputFormat(sd.getOutputFormat()).parameters(sd.getParameters()).build();
}
 
开发者ID:Netflix,项目名称:metacat,代码行数:18,代码来源:HiveConnectorInfoConverter.java

示例5: toStorageDto

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private StorageDto toStorageDto(@Nullable final StorageDescriptor sd, final String owner) {
    final StorageDto result = new StorageDto();
    if (sd != null) {
        result.setOwner(owner);
        result.setUri(sd.getLocation());
        result.setInputFormat(sd.getInputFormat());
        result.setOutputFormat(sd.getOutputFormat());
        result.setParameters(sd.getParameters());
        final SerDeInfo serde = sd.getSerdeInfo();
        if (serde != null) {
            result.setSerializationLib(serde.getSerializationLib());
            result.setSerdeInfoParameters(serde.getParameters());
        }
    }
    return result;
}
 
开发者ID:Netflix,项目名称:metacat,代码行数:17,代码来源:HiveConvertersImpl.java

示例6: StorageDescriptorWrapper

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public StorageDescriptorWrapper(StorageDescriptor storageDescriptor) {
  sd = storageDescriptor;
  location = storageDescriptor.getLocation();
  inputFormat = storageDescriptor.getInputFormat();
  outputFormat = storageDescriptor.getOutputFormat();
  compressed = storageDescriptor.isCompressed();
  numBuckets = storageDescriptor.getNumBuckets();
  serDeInfo = new SerDeInfoWrapper(storageDescriptor.getSerdeInfo());
  if (sd.getSortCols() != null) {
    sortCols = Lists.newArrayList();
    for (Order order : sd.getSortCols()) {
      sortCols.add(new OrderWrapper(order));
    }
  }
  parameters = storageDescriptor.getParameters();
  if (sd.getCols() != null) {
    this.columns = Lists.newArrayList();
    for (FieldSchema fieldSchema : sd.getCols()) {
      this.columns.add(new FieldSchemaWrapper(fieldSchema));
    }
  }
}
 
开发者ID:axbaretto,项目名称:drill,代码行数:23,代码来源:HiveTableWrapper.java

示例7: extractHiveStorageFormat

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private static HiveStorageFormat extractHiveStorageFormat(Table table)
{
    StorageDescriptor descriptor = table.getSd();
    if (descriptor == null) {
        throw new PrestoException(HIVE_INVALID_METADATA, "Table is missing storage descriptor");
    }
    SerDeInfo serdeInfo = descriptor.getSerdeInfo();
    if (serdeInfo == null) {
        throw new PrestoException(HIVE_INVALID_METADATA, "Table storage descriptor is missing SerDe info");
    }
    String outputFormat = descriptor.getOutputFormat();
    String serializationLib = serdeInfo.getSerializationLib();

    for (HiveStorageFormat format : HiveStorageFormat.values()) {
        if (format.getOutputFormat().equals(outputFormat) && format.getSerDe().equals(serializationLib)) {
            return format;
        }
    }
    throw new PrestoException(HIVE_UNSUPPORTED_FORMAT, format("Output format %s with SerDe %s is not supported", outputFormat, serializationLib));
}
 
开发者ID:y-lan,项目名称:presto,代码行数:21,代码来源:HiveMetadata.java

示例8: copyTableSdToPartitionSd

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private void copyTableSdToPartitionSd(final List<Partition> hivePartitions, final Table table) {
    //
    // Update the partition info based on that of the table.
    //
    for (Partition partition : hivePartitions) {
        final StorageDescriptor sd = partition.getSd();
        final StorageDescriptor tableSdCopy = table.getSd().deepCopy();
        if (tableSdCopy.getSerdeInfo() == null) {
            final SerDeInfo serDeInfo = new SerDeInfo(null, null, new HashMap<>());
            tableSdCopy.setSerdeInfo(serDeInfo);
        }

        tableSdCopy.setLocation(sd.getLocation());
        if (!Strings.isNullOrEmpty(sd.getInputFormat())) {
            tableSdCopy.setInputFormat(sd.getInputFormat());
        }
        if (!Strings.isNullOrEmpty(sd.getOutputFormat())) {
            tableSdCopy.setOutputFormat(sd.getOutputFormat());
        }
        if (sd.getParameters() != null && !sd.getParameters().isEmpty()) {
            tableSdCopy.setParameters(sd.getParameters());
        }
        if (sd.getSerdeInfo() != null) {
            if (!Strings.isNullOrEmpty(sd.getSerdeInfo().getName())) {
                tableSdCopy.getSerdeInfo().setName(sd.getSerdeInfo().getName());
            }
            if (!Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())) {
                tableSdCopy.getSerdeInfo().setSerializationLib(sd.getSerdeInfo().getSerializationLib());
            }
            if (sd.getSerdeInfo().getParameters() != null && !sd.getSerdeInfo().getParameters().isEmpty()) {
                tableSdCopy.getSerdeInfo().setParameters(sd.getSerdeInfo().getParameters());
            }
        }
        partition.setSd(tableSdCopy);
    }
}
 
开发者ID:Netflix,项目名称:metacat,代码行数:37,代码来源:HiveConnectorPartitionService.java

示例9: createColumnsetSchema

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public static Table createColumnsetSchema(String name, List<String> columns,
    List<String> partCols, Configuration conf) throws MetaException {

  if (columns == null) {
    throw new MetaException("columns not specified for table " + name);
  }

  Table tTable = new Table();
  tTable.setTableName(name);
  tTable.setSd(new StorageDescriptor());
  StorageDescriptor sd = tTable.getSd();
  sd.setSerdeInfo(new SerDeInfo());
  SerDeInfo serdeInfo = sd.getSerdeInfo();
  serdeInfo.setSerializationLib(LazySimpleSerDe.class.getName());
  serdeInfo.setParameters(new HashMap<String, String>());
  serdeInfo.getParameters().put(
      org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1");

  List<FieldSchema> fields = new ArrayList<FieldSchema>();
  sd.setCols(fields);
  for (String col : columns) {
    FieldSchema field = new FieldSchema(col,
        org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME, "'default'");
    fields.add(field);
  }

  tTable.setPartitionKeys(new ArrayList<FieldSchema>());
  for (String partCol : partCols) {
    FieldSchema part = new FieldSchema();
    part.setName(partCol);
    part.setType(org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME); // default
                                                                           // partition
                                                                           // key
    tTable.getPartitionKeys().add(part);
  }
  sd.setNumBuckets(-1);
  return tTable;
}
 
开发者ID:facebookarchive,项目名称:swift-hive-metastore,代码行数:39,代码来源:MetaStoreUtils.java

示例10: metacatToHivePartition

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
/**
 * {@inheritDoc}
 */
@Override
public Partition metacatToHivePartition(final PartitionDto partitionDto, @Nullable final TableDto tableDto) {
    final Partition result = new Partition();

    final QualifiedName name = partitionDto.getName();
    final List<String> values = Lists.newArrayListWithCapacity(16);
    String databaseName = "";
    String tableName = "";
    if (name != null) {
        if (name.getPartitionName() != null) {
            for (String partialPartName : SLASH_SPLITTER.split(partitionDto.getName().getPartitionName())) {
                final List<String> nameValues = ImmutableList.copyOf(EQUAL_SPLITTER.split(partialPartName));
                if (nameValues.size() != 2) {
                    throw new IllegalStateException("Unrecognized partition name: " + partitionDto.getName());
                }
                final String value = nameValues.get(1);
                values.add(value);
            }
        }

        if (name.getDatabaseName() != null) {
            databaseName = name.getDatabaseName();
        }

        if (name.getTableName() != null) {
            tableName = name.getTableName();
        }
    }
    result.setValues(values);
    result.setDbName(databaseName);
    result.setTableName(tableName);

    Map<String, String> metadata = partitionDto.getMetadata();
    if (metadata == null) {
        metadata = Maps.newHashMap();
    }
    result.setParameters(metadata);

    result.setSd(fromStorageDto(partitionDto.getSerde()));
    final StorageDescriptor sd = result.getSd();
    if (tableDto != null) {
        if (sd.getSerdeInfo() != null && tableDto.getSerde() != null && Strings.isNullOrEmpty(
            sd.getSerdeInfo().getSerializationLib())) {
            sd.getSerdeInfo().setSerializationLib(tableDto.getSerde().getSerializationLib());
        }

        final List<FieldDto> fields = tableDto.getFields();
        if (fields == null) {
            sd.setCols(Collections.emptyList());
        } else {
            sd.setCols(fields.stream()
                .filter(field -> !field.isPartition_key())
                .map(this::metacatToHiveField)
                .collect(Collectors.toList()));
        }
    }

    final AuditDto auditDto = partitionDto.getAudit();
    if (auditDto != null) {
        if (auditDto.getCreatedDate() != null) {
            result.setCreateTime(dateToEpochSeconds(auditDto.getCreatedDate()));
        }
        if (auditDto.getLastModifiedDate() != null) {
            result.setLastAccessTime(dateToEpochSeconds(auditDto.getLastModifiedDate()));
        }
    }

    return result;
}
 
开发者ID:Netflix,项目名称:metacat,代码行数:73,代码来源:HiveConvertersImpl.java

示例11: fillStorageDesc

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public Referenceable fillStorageDesc(StorageDescriptor storageDesc, String tableQualifiedName,
    String sdQualifiedName, Id tableId) throws AtlasHookException {
    LOG.debug("Filling storage descriptor information for {}", storageDesc);

    Referenceable sdReferenceable = new Referenceable(HiveDataTypes.HIVE_STORAGEDESC.getName());
    sdReferenceable.set(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, sdQualifiedName);

    SerDeInfo serdeInfo = storageDesc.getSerdeInfo();
    LOG.debug("serdeInfo = {}", serdeInfo);
    // SkewedInfo skewedInfo = storageDesc.getSkewedInfo();

    String serdeInfoName = HiveDataTypes.HIVE_SERDE.getName();
    Struct serdeInfoStruct = new Struct(serdeInfoName);

    serdeInfoStruct.set(AtlasClient.NAME, serdeInfo.getName());
    serdeInfoStruct.set("serializationLib", serdeInfo.getSerializationLib());
    serdeInfoStruct.set(PARAMETERS, serdeInfo.getParameters());

    sdReferenceable.set("serdeInfo", serdeInfoStruct);
    sdReferenceable.set(STORAGE_NUM_BUCKETS, storageDesc.getNumBuckets());
    sdReferenceable
            .set(STORAGE_IS_STORED_AS_SUB_DIRS, storageDesc.isStoredAsSubDirectories());

    List<Struct> sortColsStruct = new ArrayList<>();
    for (Order sortcol : storageDesc.getSortCols()) {
        String hiveOrderName = HiveDataTypes.HIVE_ORDER.getName();
        Struct colStruct = new Struct(hiveOrderName);
        colStruct.set("col", sortcol.getCol());
        colStruct.set("order", sortcol.getOrder());

        sortColsStruct.add(colStruct);
    }
    if (sortColsStruct.size() > 0) {
        sdReferenceable.set("sortCols", sortColsStruct);
    }

    sdReferenceable.set(LOCATION, storageDesc.getLocation());
    sdReferenceable.set("inputFormat", storageDesc.getInputFormat());
    sdReferenceable.set("outputFormat", storageDesc.getOutputFormat());
    sdReferenceable.set("compressed", storageDesc.isCompressed());

    if (storageDesc.getBucketCols().size() > 0) {
        sdReferenceable.set("bucketCols", storageDesc.getBucketCols());
    }

    sdReferenceable.set(PARAMETERS, storageDesc.getParameters());
    sdReferenceable.set("storedAsSubDirectories", storageDesc.isStoredAsSubDirectories());
    sdReferenceable.set(TABLE, tableId);

    return sdReferenceable;
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:52,代码来源:HiveMetaStoreBridge.java


注:本文中的org.apache.hadoop.hive.metastore.api.StorageDescriptor.getSerdeInfo方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。