本文整理汇总了Java中org.apache.hadoop.hive.metastore.api.StorageDescriptor.getSerdeInfo方法的典型用法代码示例。如果您正苦于以下问题:Java StorageDescriptor.getSerdeInfo方法的具体用法?Java StorageDescriptor.getSerdeInfo怎么用?Java StorageDescriptor.getSerdeInfo使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.metastore.api.StorageDescriptor
的用法示例。
在下文中一共展示了StorageDescriptor.getSerdeInfo方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: StorageDescriptorWrapper
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public StorageDescriptorWrapper(StorageDescriptor sd) {
this.sd = sd;
this.cols = Lists.newArrayList();
for (FieldSchema f : sd.getCols()) {
this.cols.add(new FieldSchemaWrapper(f));
}
this.location = sd.getLocation();
this.inputFormat = sd.getInputFormat();
this.outputFormat = sd.getOutputFormat();
this.compressed = sd.isCompressed();
this.numBuckets = sd.getNumBuckets();
this.serDeInfo = new SerDeInfoWrapper(sd.getSerdeInfo());
// this.bucketCols = sd.getBucketCols();
this.sortCols = Lists.newArrayList();
for (Order o : sd.getSortCols()) {
this.sortCols.add(new OrderWrapper(o));
}
this.parameters = sd.getParameters();
}
示例2: extractHiveStorageFormat
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private HiveStorageFormat extractHiveStorageFormat(final Table table) throws MetaException {
final StorageDescriptor descriptor = table.getSd();
if (descriptor == null) {
throw new MetaException("Table is missing storage descriptor");
}
final SerDeInfo serdeInfo = descriptor.getSerdeInfo();
if (serdeInfo == null) {
throw new MetaException(
"Table storage descriptor is missing SerDe info");
}
final String outputFormat = descriptor.getOutputFormat();
final String serializationLib = serdeInfo.getSerializationLib();
for (HiveStorageFormat format : HiveStorageFormat.values()) {
if (format.getOutputFormat().equals(outputFormat) && format.getSerde().equals(serializationLib)) {
return format;
}
}
throw new MetaException(
String.format("Output format %s with SerDe %s is not supported", outputFormat, serializationLib));
}
示例3: copyTableSdToPartitionInfoSd
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private void copyTableSdToPartitionInfoSd(final PartitionInfo partitionInfo, final Table table) {
final StorageInfo sd = partitionInfo.getSerde();
final StorageDescriptor tableSd = table.getSd();
if (StringUtils.isBlank(sd.getInputFormat())) {
sd.setInputFormat(tableSd.getInputFormat());
}
if (StringUtils.isBlank(sd.getOutputFormat())) {
sd.setOutputFormat(tableSd.getOutputFormat());
}
if (sd.getParameters() == null || sd.getParameters().isEmpty()) {
sd.setParameters(tableSd.getParameters());
}
final SerDeInfo tableSerde = tableSd.getSerdeInfo();
if (tableSerde != null) {
if (StringUtils.isBlank(sd.getSerializationLib())) {
sd.setSerializationLib(tableSerde.getSerializationLib());
}
if (sd.getSerdeInfoParameters() == null || sd.getSerdeInfoParameters().isEmpty()) {
sd.setSerdeInfoParameters(tableSerde.getParameters());
}
}
}
示例4: toStorageInfo
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private StorageInfo toStorageInfo(final StorageDescriptor sd, final String owner) {
if (sd == null) {
return new StorageInfo();
}
if (sd.getSerdeInfo() != null) {
return StorageInfo.builder().owner(owner)
.uri(sd.getLocation())
.inputFormat(sd.getInputFormat())
.outputFormat(sd.getOutputFormat())
.parameters(sd.getParameters())
.serializationLib(sd.getSerdeInfo().getSerializationLib())
.serdeInfoParameters(sd.getSerdeInfo().getParameters())
.build();
}
return StorageInfo.builder().owner(owner).uri(sd.getLocation()).inputFormat(sd.getInputFormat())
.outputFormat(sd.getOutputFormat()).parameters(sd.getParameters()).build();
}
示例5: toStorageDto
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private StorageDto toStorageDto(@Nullable final StorageDescriptor sd, final String owner) {
final StorageDto result = new StorageDto();
if (sd != null) {
result.setOwner(owner);
result.setUri(sd.getLocation());
result.setInputFormat(sd.getInputFormat());
result.setOutputFormat(sd.getOutputFormat());
result.setParameters(sd.getParameters());
final SerDeInfo serde = sd.getSerdeInfo();
if (serde != null) {
result.setSerializationLib(serde.getSerializationLib());
result.setSerdeInfoParameters(serde.getParameters());
}
}
return result;
}
示例6: StorageDescriptorWrapper
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public StorageDescriptorWrapper(StorageDescriptor storageDescriptor) {
sd = storageDescriptor;
location = storageDescriptor.getLocation();
inputFormat = storageDescriptor.getInputFormat();
outputFormat = storageDescriptor.getOutputFormat();
compressed = storageDescriptor.isCompressed();
numBuckets = storageDescriptor.getNumBuckets();
serDeInfo = new SerDeInfoWrapper(storageDescriptor.getSerdeInfo());
if (sd.getSortCols() != null) {
sortCols = Lists.newArrayList();
for (Order order : sd.getSortCols()) {
sortCols.add(new OrderWrapper(order));
}
}
parameters = storageDescriptor.getParameters();
if (sd.getCols() != null) {
this.columns = Lists.newArrayList();
for (FieldSchema fieldSchema : sd.getCols()) {
this.columns.add(new FieldSchemaWrapper(fieldSchema));
}
}
}
示例7: extractHiveStorageFormat
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private static HiveStorageFormat extractHiveStorageFormat(Table table)
{
StorageDescriptor descriptor = table.getSd();
if (descriptor == null) {
throw new PrestoException(HIVE_INVALID_METADATA, "Table is missing storage descriptor");
}
SerDeInfo serdeInfo = descriptor.getSerdeInfo();
if (serdeInfo == null) {
throw new PrestoException(HIVE_INVALID_METADATA, "Table storage descriptor is missing SerDe info");
}
String outputFormat = descriptor.getOutputFormat();
String serializationLib = serdeInfo.getSerializationLib();
for (HiveStorageFormat format : HiveStorageFormat.values()) {
if (format.getOutputFormat().equals(outputFormat) && format.getSerDe().equals(serializationLib)) {
return format;
}
}
throw new PrestoException(HIVE_UNSUPPORTED_FORMAT, format("Output format %s with SerDe %s is not supported", outputFormat, serializationLib));
}
示例8: copyTableSdToPartitionSd
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private void copyTableSdToPartitionSd(final List<Partition> hivePartitions, final Table table) {
//
// Update the partition info based on that of the table.
//
for (Partition partition : hivePartitions) {
final StorageDescriptor sd = partition.getSd();
final StorageDescriptor tableSdCopy = table.getSd().deepCopy();
if (tableSdCopy.getSerdeInfo() == null) {
final SerDeInfo serDeInfo = new SerDeInfo(null, null, new HashMap<>());
tableSdCopy.setSerdeInfo(serDeInfo);
}
tableSdCopy.setLocation(sd.getLocation());
if (!Strings.isNullOrEmpty(sd.getInputFormat())) {
tableSdCopy.setInputFormat(sd.getInputFormat());
}
if (!Strings.isNullOrEmpty(sd.getOutputFormat())) {
tableSdCopy.setOutputFormat(sd.getOutputFormat());
}
if (sd.getParameters() != null && !sd.getParameters().isEmpty()) {
tableSdCopy.setParameters(sd.getParameters());
}
if (sd.getSerdeInfo() != null) {
if (!Strings.isNullOrEmpty(sd.getSerdeInfo().getName())) {
tableSdCopy.getSerdeInfo().setName(sd.getSerdeInfo().getName());
}
if (!Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())) {
tableSdCopy.getSerdeInfo().setSerializationLib(sd.getSerdeInfo().getSerializationLib());
}
if (sd.getSerdeInfo().getParameters() != null && !sd.getSerdeInfo().getParameters().isEmpty()) {
tableSdCopy.getSerdeInfo().setParameters(sd.getSerdeInfo().getParameters());
}
}
partition.setSd(tableSdCopy);
}
}
示例9: createColumnsetSchema
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public static Table createColumnsetSchema(String name, List<String> columns,
List<String> partCols, Configuration conf) throws MetaException {
if (columns == null) {
throw new MetaException("columns not specified for table " + name);
}
Table tTable = new Table();
tTable.setTableName(name);
tTable.setSd(new StorageDescriptor());
StorageDescriptor sd = tTable.getSd();
sd.setSerdeInfo(new SerDeInfo());
SerDeInfo serdeInfo = sd.getSerdeInfo();
serdeInfo.setSerializationLib(LazySimpleSerDe.class.getName());
serdeInfo.setParameters(new HashMap<String, String>());
serdeInfo.getParameters().put(
org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1");
List<FieldSchema> fields = new ArrayList<FieldSchema>();
sd.setCols(fields);
for (String col : columns) {
FieldSchema field = new FieldSchema(col,
org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME, "'default'");
fields.add(field);
}
tTable.setPartitionKeys(new ArrayList<FieldSchema>());
for (String partCol : partCols) {
FieldSchema part = new FieldSchema();
part.setName(partCol);
part.setType(org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME); // default
// partition
// key
tTable.getPartitionKeys().add(part);
}
sd.setNumBuckets(-1);
return tTable;
}
示例10: metacatToHivePartition
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
/**
* {@inheritDoc}
*/
@Override
public Partition metacatToHivePartition(final PartitionDto partitionDto, @Nullable final TableDto tableDto) {
final Partition result = new Partition();
final QualifiedName name = partitionDto.getName();
final List<String> values = Lists.newArrayListWithCapacity(16);
String databaseName = "";
String tableName = "";
if (name != null) {
if (name.getPartitionName() != null) {
for (String partialPartName : SLASH_SPLITTER.split(partitionDto.getName().getPartitionName())) {
final List<String> nameValues = ImmutableList.copyOf(EQUAL_SPLITTER.split(partialPartName));
if (nameValues.size() != 2) {
throw new IllegalStateException("Unrecognized partition name: " + partitionDto.getName());
}
final String value = nameValues.get(1);
values.add(value);
}
}
if (name.getDatabaseName() != null) {
databaseName = name.getDatabaseName();
}
if (name.getTableName() != null) {
tableName = name.getTableName();
}
}
result.setValues(values);
result.setDbName(databaseName);
result.setTableName(tableName);
Map<String, String> metadata = partitionDto.getMetadata();
if (metadata == null) {
metadata = Maps.newHashMap();
}
result.setParameters(metadata);
result.setSd(fromStorageDto(partitionDto.getSerde()));
final StorageDescriptor sd = result.getSd();
if (tableDto != null) {
if (sd.getSerdeInfo() != null && tableDto.getSerde() != null && Strings.isNullOrEmpty(
sd.getSerdeInfo().getSerializationLib())) {
sd.getSerdeInfo().setSerializationLib(tableDto.getSerde().getSerializationLib());
}
final List<FieldDto> fields = tableDto.getFields();
if (fields == null) {
sd.setCols(Collections.emptyList());
} else {
sd.setCols(fields.stream()
.filter(field -> !field.isPartition_key())
.map(this::metacatToHiveField)
.collect(Collectors.toList()));
}
}
final AuditDto auditDto = partitionDto.getAudit();
if (auditDto != null) {
if (auditDto.getCreatedDate() != null) {
result.setCreateTime(dateToEpochSeconds(auditDto.getCreatedDate()));
}
if (auditDto.getLastModifiedDate() != null) {
result.setLastAccessTime(dateToEpochSeconds(auditDto.getLastModifiedDate()));
}
}
return result;
}
示例11: fillStorageDesc
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public Referenceable fillStorageDesc(StorageDescriptor storageDesc, String tableQualifiedName,
String sdQualifiedName, Id tableId) throws AtlasHookException {
LOG.debug("Filling storage descriptor information for {}", storageDesc);
Referenceable sdReferenceable = new Referenceable(HiveDataTypes.HIVE_STORAGEDESC.getName());
sdReferenceable.set(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, sdQualifiedName);
SerDeInfo serdeInfo = storageDesc.getSerdeInfo();
LOG.debug("serdeInfo = {}", serdeInfo);
// SkewedInfo skewedInfo = storageDesc.getSkewedInfo();
String serdeInfoName = HiveDataTypes.HIVE_SERDE.getName();
Struct serdeInfoStruct = new Struct(serdeInfoName);
serdeInfoStruct.set(AtlasClient.NAME, serdeInfo.getName());
serdeInfoStruct.set("serializationLib", serdeInfo.getSerializationLib());
serdeInfoStruct.set(PARAMETERS, serdeInfo.getParameters());
sdReferenceable.set("serdeInfo", serdeInfoStruct);
sdReferenceable.set(STORAGE_NUM_BUCKETS, storageDesc.getNumBuckets());
sdReferenceable
.set(STORAGE_IS_STORED_AS_SUB_DIRS, storageDesc.isStoredAsSubDirectories());
List<Struct> sortColsStruct = new ArrayList<>();
for (Order sortcol : storageDesc.getSortCols()) {
String hiveOrderName = HiveDataTypes.HIVE_ORDER.getName();
Struct colStruct = new Struct(hiveOrderName);
colStruct.set("col", sortcol.getCol());
colStruct.set("order", sortcol.getOrder());
sortColsStruct.add(colStruct);
}
if (sortColsStruct.size() > 0) {
sdReferenceable.set("sortCols", sortColsStruct);
}
sdReferenceable.set(LOCATION, storageDesc.getLocation());
sdReferenceable.set("inputFormat", storageDesc.getInputFormat());
sdReferenceable.set("outputFormat", storageDesc.getOutputFormat());
sdReferenceable.set("compressed", storageDesc.isCompressed());
if (storageDesc.getBucketCols().size() > 0) {
sdReferenceable.set("bucketCols", storageDesc.getBucketCols());
}
sdReferenceable.set(PARAMETERS, storageDesc.getParameters());
sdReferenceable.set("storedAsSubDirectories", storageDesc.isStoredAsSubDirectories());
sdReferenceable.set(TABLE, tableId);
return sdReferenceable;
}