本文整理汇总了Java中org.apache.hadoop.hive.metastore.api.StorageDescriptor.setInputFormat方法的典型用法代码示例。如果您正苦于以下问题:Java StorageDescriptor.setInputFormat方法的具体用法?Java StorageDescriptor.setInputFormat怎么用?Java StorageDescriptor.setInputFormat使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.metastore.api.StorageDescriptor
的用法示例。
在下文中一共展示了StorageDescriptor.setInputFormat方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createUnpartitionedTable
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public static Table createUnpartitionedTable(
HiveMetaStoreClient metaStoreClient,
String database,
String table,
URI location)
throws TException {
Table hiveTable = new Table();
hiveTable.setDbName(database);
hiveTable.setTableName(table);
hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
hiveTable.putToParameters("EXTERNAL", "TRUE");
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setLocation(location.toString());
sd.setParameters(new HashMap<String, String>());
sd.setInputFormat(TextInputFormat.class.getName());
sd.setOutputFormat(TextOutputFormat.class.getName());
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.OpenCSVSerde");
hiveTable.setSd(sd);
metaStoreClient.createTable(hiveTable);
ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, database, table);
ColumnStatisticsData statsData = new ColumnStatisticsData(_Fields.LONG_STATS, new LongColumnStatsData(1L, 2L));
ColumnStatisticsObj cso1 = new ColumnStatisticsObj("id", "bigint", statsData);
List<ColumnStatisticsObj> statsObj = Collections.singletonList(cso1);
metaStoreClient.updateTableColumnStatistics(new ColumnStatistics(statsDesc, statsObj));
return hiveTable;
}
示例2: createPartitionedTable
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public static Table createPartitionedTable(
HiveMetaStoreClient metaStoreClient,
String database,
String table,
URI location)
throws Exception {
Table hiveTable = new Table();
hiveTable.setDbName(database);
hiveTable.setTableName(table);
hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
hiveTable.putToParameters("EXTERNAL", "TRUE");
hiveTable.setPartitionKeys(PARTITION_COLUMNS);
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setLocation(location.toString());
sd.setParameters(new HashMap<String, String>());
sd.setInputFormat(TextInputFormat.class.getName());
sd.setOutputFormat(TextOutputFormat.class.getName());
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.OpenCSVSerde");
hiveTable.setSd(sd);
metaStoreClient.createTable(hiveTable);
ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, database, table);
ColumnStatisticsData statsData = new ColumnStatisticsData(_Fields.LONG_STATS, new LongColumnStatsData(1L, 2L));
ColumnStatisticsObj cso1 = new ColumnStatisticsObj("id", "bigint", statsData);
List<ColumnStatisticsObj> statsObj = Collections.singletonList(cso1);
metaStoreClient.updateTableColumnStatistics(new ColumnStatistics(statsDesc, statsObj));
return hiveTable;
}
示例3: newTable
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public static Table newTable(String database, String tableName) {
Table table = new Table();
table.setDbName(database);
table.setTableName(tableName);
table.setTableType(TABLE_TYPE);
table.setOwner(OWNER);
table.setCreateTime(CREATE_TIME);
table.setRetention(RETENTION);
Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
privileges.setUserPrivileges(userPrivileges);
table.setPrivileges(privileges);
StorageDescriptor storageDescriptor = new StorageDescriptor();
storageDescriptor.setCols(COLS);
storageDescriptor.setInputFormat(INPUT_FORMAT);
storageDescriptor.setOutputFormat(OUTPUT_FORMAT);
storageDescriptor.setSerdeInfo(new SerDeInfo(SERDE_INFO_NAME, SERIALIZATION_LIB, new HashMap<String, String>()));
storageDescriptor.setSkewedInfo(new SkewedInfo());
storageDescriptor.setParameters(new HashMap<String, String>());
storageDescriptor.setLocation(DATABASE + "/" + tableName + "/");
table.setSd(storageDescriptor);
Map<String, String> parameters = new HashMap<>();
parameters.put("com.company.parameter", "abc");
table.setParameters(parameters);
return table;
}
示例4: newPartition
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public static Partition newPartition(String database, String tableName, String partitionValue) {
Partition partition = new Partition();
partition.setDbName(database);
partition.setTableName(tableName);
partition.setCreateTime(CREATE_TIME);
partition.setValues(ImmutableList.of(partitionValue));
Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
privileges.setUserPrivileges(userPrivileges);
partition.setPrivileges(privileges);
StorageDescriptor storageDescriptor = new StorageDescriptor();
storageDescriptor.setCols(COLS);
storageDescriptor.setInputFormat(INPUT_FORMAT);
storageDescriptor.setOutputFormat(OUTPUT_FORMAT);
storageDescriptor.setSerdeInfo(new SerDeInfo(SERDE_INFO_NAME, SERIALIZATION_LIB, new HashMap<String, String>()));
storageDescriptor.setSkewedInfo(new SkewedInfo());
storageDescriptor.setParameters(new HashMap<String, String>());
storageDescriptor.setLocation(DATABASE + "/" + tableName + "/" + partitionValue + "/");
partition.setSd(storageDescriptor);
Map<String, String> parameters = new HashMap<>();
parameters.put("com.company.parameter", "abc");
partition.setParameters(parameters);
return partition;
}
示例5: init
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Before
public void init() {
table = new Table();
table.setDbName("database");
table.setTableName("table");
table.setTableType("type");
Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
privileges.setUserPrivileges(userPrivileges);
table.setPrivileges(privileges);
StorageDescriptor storageDescriptor = new StorageDescriptor();
storageDescriptor.setCols(Arrays.asList(new FieldSchema("a", "int", null)));
storageDescriptor.setInputFormat("input_format");
storageDescriptor.setOutputFormat("output_format");
storageDescriptor.setSerdeInfo(new SerDeInfo("serde", "lib", new HashMap<String, String>()));
storageDescriptor.setSkewedInfo(new SkewedInfo());
storageDescriptor.setParameters(new HashMap<String, String>());
storageDescriptor.setLocation("database/table/");
table.setSd(storageDescriptor);
Map<String, String> parameters = new HashMap<>();
parameters.put("com.company.parameter", "abc");
table.setParameters(parameters);
}
示例6: init
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Before
public void init() {
partition = new Partition();
partition.setDbName("database");
partition.setTableName("table");
partition.setValues(ImmutableList.of("part"));
Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
privileges.setUserPrivileges(userPrivileges);
partition.setPrivileges(privileges);
StorageDescriptor storageDescriptor = new StorageDescriptor();
storageDescriptor.setCols(Arrays.asList(new FieldSchema("a", "int", null)));
storageDescriptor.setInputFormat("input_format");
storageDescriptor.setOutputFormat("output_format");
storageDescriptor.setSerdeInfo(new SerDeInfo("serde", "lib", new HashMap<String, String>()));
storageDescriptor.setSkewedInfo(new SkewedInfo());
storageDescriptor.setParameters(new HashMap<String, String>());
storageDescriptor.setLocation("database/table/part/");
partition.setSd(storageDescriptor);
Map<String, String> parameters = new HashMap<>();
parameters.put("com.company.parameter", "abc");
partition.setParameters(parameters);
}
示例7: copyTableSdToPartitionSd
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private void copyTableSdToPartitionSd(final List<Partition> hivePartitions, final Table table) {
//
// Update the partition info based on that of the table.
//
for (Partition partition : hivePartitions) {
final StorageDescriptor sd = partition.getSd();
final StorageDescriptor tableSdCopy = table.getSd().deepCopy();
if (tableSdCopy.getSerdeInfo() == null) {
final SerDeInfo serDeInfo = new SerDeInfo(null, null, new HashMap<>());
tableSdCopy.setSerdeInfo(serDeInfo);
}
tableSdCopy.setLocation(sd.getLocation());
if (!Strings.isNullOrEmpty(sd.getInputFormat())) {
tableSdCopy.setInputFormat(sd.getInputFormat());
}
if (!Strings.isNullOrEmpty(sd.getOutputFormat())) {
tableSdCopy.setOutputFormat(sd.getOutputFormat());
}
if (sd.getParameters() != null && !sd.getParameters().isEmpty()) {
tableSdCopy.setParameters(sd.getParameters());
}
if (sd.getSerdeInfo() != null) {
if (!Strings.isNullOrEmpty(sd.getSerdeInfo().getName())) {
tableSdCopy.getSerdeInfo().setName(sd.getSerdeInfo().getName());
}
if (!Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())) {
tableSdCopy.getSerdeInfo().setSerializationLib(sd.getSerdeInfo().getSerializationLib());
}
if (sd.getSerdeInfo().getParameters() != null && !sd.getSerdeInfo().getParameters().isEmpty()) {
tableSdCopy.getSerdeInfo().setParameters(sd.getSerdeInfo().getParameters());
}
}
partition.setSd(tableSdCopy);
}
}
示例8: getStorageDescriptor
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private static StorageDescriptor getStorageDescriptor(HiveRegistrationUnit unit) {
State props = unit.getStorageProps();
StorageDescriptor sd = new StorageDescriptor();
sd.setParameters(getParameters(props));
sd.setCols(getFieldSchemas(unit));
if (unit.getLocation().isPresent()) {
sd.setLocation(unit.getLocation().get());
}
if (unit.getInputFormat().isPresent()) {
sd.setInputFormat(unit.getInputFormat().get());
}
if (unit.getOutputFormat().isPresent()) {
sd.setOutputFormat(unit.getOutputFormat().get());
}
if (unit.getIsCompressed().isPresent()) {
sd.setCompressed(unit.getIsCompressed().get());
}
if (unit.getNumBuckets().isPresent()) {
sd.setNumBuckets(unit.getNumBuckets().get());
}
if (unit.getBucketColumns().isPresent()) {
sd.setBucketCols(unit.getBucketColumns().get());
}
if (unit.getIsStoredAsSubDirs().isPresent()) {
sd.setStoredAsSubDirectories(unit.getIsStoredAsSubDirs().get());
}
sd.setSerdeInfo(getSerDeInfo(unit));
return sd;
}
示例9: fromStorageDto
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private StorageDescriptor fromStorageDto(@Nullable final StorageDto storageDto) {
// Set all required fields to a non-null value
final StorageDescriptor result = new StorageDescriptor();
String inputFormat = "";
String location = "";
String outputFormat = "";
final String serdeName = "";
String serializationLib = "";
Map<String, String> sdParams = Maps.newHashMap();
Map<String, String> serdeParams = Maps.newHashMap();
if (storageDto != null) {
if (storageDto.getInputFormat() != null) {
inputFormat = storageDto.getInputFormat();
}
if (storageDto.getUri() != null) {
location = storageDto.getUri();
}
if (storageDto.getOutputFormat() != null) {
outputFormat = storageDto.getOutputFormat();
}
if (storageDto.getSerializationLib() != null) {
serializationLib = storageDto.getSerializationLib();
}
if (storageDto.getParameters() != null) {
sdParams = storageDto.getParameters();
}
if (storageDto.getSerdeInfoParameters() != null) {
serdeParams = storageDto.getSerdeInfoParameters();
}
}
result.setInputFormat(inputFormat);
result.setLocation(location);
result.setOutputFormat(outputFormat);
result.setSerdeInfo(new SerDeInfo(serdeName, serializationLib, serdeParams));
result.setCols(Collections.emptyList());
result.setBucketCols(Collections.emptyList());
result.setSortCols(Collections.emptyList());
result.setParameters(sdParams);
return result;
}
示例10: createTable
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private Table createTable(
String schemaName,
String tableName,
String tableOwner,
List<HiveColumnHandle> columnHandles,
HiveStorageFormat hiveStorageFormat,
List<String> partitionedBy,
OptionalInt retentionDays,
Path targetPath)
{
Map<String, HiveColumnHandle> columnHandlesByName = Maps.uniqueIndex(columnHandles, HiveColumnHandle::getName);
List<FieldSchema> partitionColumns = partitionedBy.stream()
.map(columnHandlesByName::get)
.map(column -> new FieldSchema(column.getName(), column.getHiveType().getHiveTypeName(), null))
.collect(toList());
Set<String> partitionColumnNames = ImmutableSet.copyOf(partitionedBy);
boolean sampled = false;
ImmutableList.Builder<FieldSchema> columns = ImmutableList.builder();
for (HiveColumnHandle columnHandle : columnHandles) {
String name = columnHandle.getName();
String type = columnHandle.getHiveType().getHiveTypeName();
if (name.equals(SAMPLE_WEIGHT_COLUMN_NAME)) {
columns.add(new FieldSchema(name, type, "Presto sample weight column"));
sampled = true;
}
else if (!partitionColumnNames.contains(name)) {
verify(!columnHandle.isPartitionKey(), "Column handles are not consistent with partitioned by property");
columns.add(new FieldSchema(name, type, null));
}
else {
verify(columnHandle.isPartitionKey(), "Column handles are not consistent with partitioned by property");
}
}
SerDeInfo serdeInfo = new SerDeInfo();
serdeInfo.setName(tableName);
serdeInfo.setSerializationLib(hiveStorageFormat.getSerDe());
serdeInfo.setParameters(ImmutableMap.of());
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation(targetPath.toString());
sd.setCols(columns.build());
sd.setSerdeInfo(serdeInfo);
sd.setInputFormat(hiveStorageFormat.getInputFormat());
sd.setOutputFormat(hiveStorageFormat.getOutputFormat());
sd.setParameters(ImmutableMap.of());
Table table = new Table();
table.setDbName(schemaName);
table.setTableName(tableName);
table.setOwner(tableOwner);
table.setTableType(TableType.MANAGED_TABLE.toString());
String tableComment = "Created by Presto";
if (sampled) {
tableComment = "Sampled table created by Presto. Only query this table from Hive if you understand how Presto implements sampling.";
}
table.setParameters(ImmutableMap.of("comment", tableComment));
table.setPartitionKeys(partitionColumns);
table.setSd(sd);
if (retentionDays.isPresent()) {
table.setRetention(retentionDays.getAsInt());
}
PrivilegeGrantInfo allPrivileges = new PrivilegeGrantInfo("all", 0, tableOwner, PrincipalType.USER, true);
table.setPrivileges(new PrincipalPrivilegeSet(
ImmutableMap.of(tableOwner, ImmutableList.of(allPrivileges)),
ImmutableMap.of(),
ImmutableMap.of()));
metastore.createTable(table);
return table;
}