本文整理汇总了Java中org.apache.hadoop.hive.metastore.api.SerDeInfo类的典型用法代码示例。如果您正苦于以下问题:Java SerDeInfo类的具体用法?Java SerDeInfo怎么用?Java SerDeInfo使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SerDeInfo类属于org.apache.hadoop.hive.metastore.api包,在下文中一共展示了SerDeInfo类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createPartitionedTable
import org.apache.hadoop.hive.metastore.api.SerDeInfo; //导入依赖的package包/类
private Table createPartitionedTable(String databaseName, String tableName) throws Exception {
Table table = new Table();
table.setDbName(DATABASE);
table.setTableName(tableName);
table.setPartitionKeys(Arrays.asList(new FieldSchema("partcol", "int", null)));
table.setSd(new StorageDescriptor());
table.getSd().setCols(Arrays.asList(new FieldSchema("id", "int", null), new FieldSchema("name", "string", null)));
table.getSd().setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
table.getSd().setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat");
table.getSd().setSerdeInfo(new SerDeInfo());
table.getSd().getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
HiveMetaStoreClient client = server.newClient();
client.createTable(table);
client.close();
return table;
}
示例2: createPartitionedTable
import org.apache.hadoop.hive.metastore.api.SerDeInfo; //导入依赖的package包/类
static Table createPartitionedTable(HiveMetaStoreClient metaStoreClient, String database, String table, File location)
throws Exception {
Table hiveTable = new Table();
hiveTable.setDbName(database);
hiveTable.setTableName(table);
hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
hiveTable.putToParameters("EXTERNAL", "TRUE");
hiveTable.setPartitionKeys(PARTITION_COLUMNS);
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setLocation(location.toURI().toString());
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
hiveTable.setSd(sd);
metaStoreClient.createTable(hiveTable);
return hiveTable;
}
示例3: createView
import org.apache.hadoop.hive.metastore.api.SerDeInfo; //导入依赖的package包/类
private static Table createView(
HiveMetaStoreClient metaStoreClient,
String database,
String view,
String table,
List<FieldSchema> partitionCols)
throws TException {
Table hiveView = new Table();
hiveView.setDbName(database);
hiveView.setTableName(view);
hiveView.setTableType(TableType.VIRTUAL_VIEW.name());
hiveView.setViewOriginalText(hql(database, table));
hiveView.setViewExpandedText(expandHql(database, table, DATA_COLUMNS, partitionCols));
hiveView.setPartitionKeys(partitionCols);
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
hiveView.setSd(sd);
metaStoreClient.createTable(hiveView);
return hiveView;
}
示例4: createUnpartitionedTable
import org.apache.hadoop.hive.metastore.api.SerDeInfo; //导入依赖的package包/类
static Table createUnpartitionedTable(
HiveMetaStoreClient metaStoreClient,
String database,
String table,
File location)
throws TException {
Table hiveTable = new Table();
hiveTable.setDbName(database);
hiveTable.setTableName(table);
hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
hiveTable.putToParameters("EXTERNAL", "TRUE");
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setLocation(location.toURI().toString());
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
hiveTable.setSd(sd);
metaStoreClient.createTable(hiveTable);
return hiveTable;
}
示例5: extractHiveStorageFormat
import org.apache.hadoop.hive.metastore.api.SerDeInfo; //导入依赖的package包/类
private HiveStorageFormat extractHiveStorageFormat(final Table table) throws MetaException {
final StorageDescriptor descriptor = table.getSd();
if (descriptor == null) {
throw new MetaException("Table is missing storage descriptor");
}
final SerDeInfo serdeInfo = descriptor.getSerdeInfo();
if (serdeInfo == null) {
throw new MetaException(
"Table storage descriptor is missing SerDe info");
}
final String outputFormat = descriptor.getOutputFormat();
final String serializationLib = serdeInfo.getSerializationLib();
for (HiveStorageFormat format : HiveStorageFormat.values()) {
if (format.getOutputFormat().equals(outputFormat) && format.getSerde().equals(serializationLib)) {
return format;
}
}
throw new MetaException(
String.format("Output format %s with SerDe %s is not supported", outputFormat, serializationLib));
}
示例6: copyTableSdToPartitionInfoSd
import org.apache.hadoop.hive.metastore.api.SerDeInfo; //导入依赖的package包/类
private void copyTableSdToPartitionInfoSd(final PartitionInfo partitionInfo, final Table table) {
final StorageInfo sd = partitionInfo.getSerde();
final StorageDescriptor tableSd = table.getSd();
if (StringUtils.isBlank(sd.getInputFormat())) {
sd.setInputFormat(tableSd.getInputFormat());
}
if (StringUtils.isBlank(sd.getOutputFormat())) {
sd.setOutputFormat(tableSd.getOutputFormat());
}
if (sd.getParameters() == null || sd.getParameters().isEmpty()) {
sd.setParameters(tableSd.getParameters());
}
final SerDeInfo tableSerde = tableSd.getSerdeInfo();
if (tableSerde != null) {
if (StringUtils.isBlank(sd.getSerializationLib())) {
sd.setSerializationLib(tableSerde.getSerializationLib());
}
if (sd.getSerdeInfoParameters() == null || sd.getSerdeInfoParameters().isEmpty()) {
sd.setSerdeInfoParameters(tableSerde.getParameters());
}
}
}
示例7: toStorageDto
import org.apache.hadoop.hive.metastore.api.SerDeInfo; //导入依赖的package包/类
private StorageDto toStorageDto(@Nullable final StorageDescriptor sd, final String owner) {
final StorageDto result = new StorageDto();
if (sd != null) {
result.setOwner(owner);
result.setUri(sd.getLocation());
result.setInputFormat(sd.getInputFormat());
result.setOutputFormat(sd.getOutputFormat());
result.setParameters(sd.getParameters());
final SerDeInfo serde = sd.getSerdeInfo();
if (serde != null) {
result.setSerializationLib(serde.getSerializationLib());
result.setSerdeInfoParameters(serde.getParameters());
}
}
return result;
}
示例8: extractHiveStorageFormat
import org.apache.hadoop.hive.metastore.api.SerDeInfo; //导入依赖的package包/类
private static HiveStorageFormat extractHiveStorageFormat(Table table)
{
StorageDescriptor descriptor = table.getSd();
if (descriptor == null) {
throw new PrestoException(HIVE_INVALID_METADATA, "Table is missing storage descriptor");
}
SerDeInfo serdeInfo = descriptor.getSerdeInfo();
if (serdeInfo == null) {
throw new PrestoException(HIVE_INVALID_METADATA, "Table storage descriptor is missing SerDe info");
}
String outputFormat = descriptor.getOutputFormat();
String serializationLib = serdeInfo.getSerializationLib();
for (HiveStorageFormat format : HiveStorageFormat.values()) {
if (format.getOutputFormat().equals(outputFormat) && format.getSerDe().equals(serializationLib)) {
return format;
}
}
throw new PrestoException(HIVE_UNSUPPORTED_FORMAT, format("Output format %s with SerDe %s is not supported", outputFormat, serializationLib));
}
示例9: addTestPartition
import org.apache.hadoop.hive.metastore.api.SerDeInfo; //导入依赖的package包/类
public Partition addTestPartition(Table tbl, List<String> values, int createTime) throws Exception {
StorageDescriptor partitionSd = new StorageDescriptor();
if (StringUtils.isNotBlank(tbl.getSd().getLocation())) {
partitionSd.setLocation(tbl.getSd().getLocation() + values);
} else {
partitionSd.setLocation("/tmp/" + tbl.getTableName() + "/part1");
}
partitionSd.setSerdeInfo(
new SerDeInfo("name", "serializationLib", ImmutableMap.of(HiveAvroSerDeManager.SCHEMA_URL, "/tmp/dummy")));
partitionSd.setCols(tbl.getPartitionKeys());
Partition partition =
new Partition(values, tbl.getDbName(), tbl.getTableName(), 1, 1, partitionSd, new HashMap<String, String>());
partition.setCreateTime(createTime);
return this.getLocalMetastoreClient().add_partition(partition);
}
示例10: makeMetastoreTableObject
import org.apache.hadoop.hive.metastore.api.SerDeInfo; //导入依赖的package包/类
public Table makeMetastoreTableObject(HiveMetaStoreClient client,
String dbName, String tabName, List<FieldSchema> cols) throws Exception {
Table tbl = new Table();
tbl.setDbName(dbName);
tbl.setTableName(tabName);
StorageDescriptor sd = new StorageDescriptor();
tbl.setSd(sd);
tbl.setParameters(new HashMap<String, String>());
sd.setCols(cols);
sd.setCompressed(false);
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters()
.put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.setSortCols(new ArrayList<Order>());
return tbl;
}
示例11: createUnpartitionedTable
import org.apache.hadoop.hive.metastore.api.SerDeInfo; //导入依赖的package包/类
private Table createUnpartitionedTable(String databaseName, String tableName) throws Exception {
Table table = new Table();
table.setDbName(databaseName);
table.setTableName(tableName);
table.setSd(new StorageDescriptor());
table.getSd().setCols(Arrays.asList(new FieldSchema("id", "int", null), new FieldSchema("name", "string", null)));
table.getSd().setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
table.getSd().setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat");
table.getSd().setSerdeInfo(new SerDeInfo());
table.getSd().getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
HiveMetaStoreClient client = server.newClient();
client.createTable(table);
client.close();
return table;
}
示例12: createTable
import org.apache.hadoop.hive.metastore.api.SerDeInfo; //导入依赖的package包/类
private void createTable(File sourceTableUri) throws Exception {
File partitionEurope = new File(sourceTableUri, "local_date=2000-01-01");
File partitionUk = new File(partitionEurope, "local_hour=0");
File dataFileUk = new File(partitionUk, PART_00000);
FileUtils.writeStringToFile(dataFileUk, "1\tadam\tlondon\n2\tsusan\tglasgow\n");
File partitionAsia = new File(sourceTableUri, "local_date=2000-01-02");
File partitionChina = new File(partitionAsia, "local_hour=0");
File dataFileChina = new File(partitionChina, PART_00000);
String data = "1\tchun\tbeijing\n2\tshanghai\tmilan\n";
FileUtils.writeStringToFile(dataFileChina, data);
HiveMetaStoreClient sourceClient = sourceCatalog.client();
Table source = new Table();
source.setDbName(DATABASE);
source.setTableName(TABLE);
source.setTableType(TableType.EXTERNAL_TABLE.name());
source.setParameters(new HashMap<String, String>());
List<FieldSchema> partitionColumns = Arrays.asList(new FieldSchema("local_date", "string", ""),
new FieldSchema("local_hour", "string", ""));
source.setPartitionKeys(partitionColumns);
List<FieldSchema> dataColumns = Arrays.asList(new FieldSchema("id", "bigint", ""),
new FieldSchema("name", "string", ""), new FieldSchema("city", "tinyint", ""));
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(dataColumns);
sd.setLocation(sourceTableUri.toURI().toString());
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
source.setSd(sd);
sourceClient.createTable(source);
LOG.info(">>>> Partitions added: {}",
+sourceClient.add_partitions(Arrays.asList(newPartition(sd, Arrays.asList("2000-01-01", "0"), partitionUk),
newPartition(sd, Arrays.asList("2000-01-02", "0"), partitionChina))));
}
示例13: before
import org.apache.hadoop.hive.metastore.api.SerDeInfo; //导入依赖的package包/类
@Before
public void before() throws TException, IOException {
Table table = new Table();
table.setDbName(DATABASE);
table.setTableName("source_" + TABLE);
table.setTableType(TableType.EXTERNAL_TABLE.name());
table.putToParameters("EXTERNAL", "TRUE");
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(Arrays.asList(new FieldSchema("col1", "string", null)));
sd.setSerdeInfo(new SerDeInfo());
table.setSd(sd);
hive.client().createTable(table);
}
示例14: newStorageDescriptor
import org.apache.hadoop.hive.metastore.api.SerDeInfo; //导入依赖的package包/类
public static StorageDescriptor newStorageDescriptor(File location, String... columns) {
StorageDescriptor sd = new StorageDescriptor();
List<FieldSchema> cols = new ArrayList<>(columns.length);
for (String name : columns) {
cols.add(newFieldSchema(name));
}
sd.setCols(cols);
sd.setSerdeInfo(new SerDeInfo());
sd.setLocation(location.toURI().toString());
return sd;
}
示例15: createUnpartitionedTable
import org.apache.hadoop.hive.metastore.api.SerDeInfo; //导入依赖的package包/类
public static Table createUnpartitionedTable(
HiveMetaStoreClient metaStoreClient,
String database,
String table,
URI location)
throws TException {
Table hiveTable = new Table();
hiveTable.setDbName(database);
hiveTable.setTableName(table);
hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
hiveTable.putToParameters("EXTERNAL", "TRUE");
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setLocation(location.toString());
sd.setParameters(new HashMap<String, String>());
sd.setInputFormat(TextInputFormat.class.getName());
sd.setOutputFormat(TextOutputFormat.class.getName());
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.OpenCSVSerde");
hiveTable.setSd(sd);
metaStoreClient.createTable(hiveTable);
ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, database, table);
ColumnStatisticsData statsData = new ColumnStatisticsData(_Fields.LONG_STATS, new LongColumnStatsData(1L, 2L));
ColumnStatisticsObj cso1 = new ColumnStatisticsObj("id", "bigint", statsData);
List<ColumnStatisticsObj> statsObj = Collections.singletonList(cso1);
metaStoreClient.updateTableColumnStatistics(new ColumnStatistics(statsDesc, statsObj));
return hiveTable;
}