本文整理汇总了Java中org.apache.hadoop.hive.metastore.api.StorageDescriptor.setCols方法的典型用法代码示例。如果您正苦于以下问题:Java StorageDescriptor.setCols方法的具体用法?Java StorageDescriptor.setCols怎么用?Java StorageDescriptor.setCols使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.metastore.api.StorageDescriptor
的用法示例。
在下文中一共展示了StorageDescriptor.setCols方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createPartitionedTable
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
static Table createPartitionedTable(HiveMetaStoreClient metaStoreClient, String database, String table, File location)
throws Exception {
Table hiveTable = new Table();
hiveTable.setDbName(database);
hiveTable.setTableName(table);
hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
hiveTable.putToParameters("EXTERNAL", "TRUE");
hiveTable.setPartitionKeys(PARTITION_COLUMNS);
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setLocation(location.toURI().toString());
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
hiveTable.setSd(sd);
metaStoreClient.createTable(hiveTable);
return hiveTable;
}
示例2: createView
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private static Table createView(
HiveMetaStoreClient metaStoreClient,
String database,
String view,
String table,
List<FieldSchema> partitionCols)
throws TException {
Table hiveView = new Table();
hiveView.setDbName(database);
hiveView.setTableName(view);
hiveView.setTableType(TableType.VIRTUAL_VIEW.name());
hiveView.setViewOriginalText(hql(database, table));
hiveView.setViewExpandedText(expandHql(database, table, DATA_COLUMNS, partitionCols));
hiveView.setPartitionKeys(partitionCols);
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
hiveView.setSd(sd);
metaStoreClient.createTable(hiveView);
return hiveView;
}
示例3: createUnpartitionedTable
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
static Table createUnpartitionedTable(
HiveMetaStoreClient metaStoreClient,
String database,
String table,
File location)
throws TException {
Table hiveTable = new Table();
hiveTable.setDbName(database);
hiveTable.setTableName(table);
hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
hiveTable.putToParameters("EXTERNAL", "TRUE");
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setLocation(location.toURI().toString());
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
hiveTable.setSd(sd);
metaStoreClient.createTable(hiveTable);
return hiveTable;
}
示例4: testCheckTableSchemaMappingMissingColumn
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Test
public void testCheckTableSchemaMappingMissingColumn() throws MetaException {
TableDescription description = getHashRangeTable();
Table table = new Table();
Map<String, String> parameters = Maps.newHashMap();
parameters.put(DynamoDBConstants.DYNAMODB_COLUMN_MAPPING, "col1:dynamo_col1$,hashKey:hashKey");
table.setParameters(parameters);
StorageDescriptor sd = new StorageDescriptor();
List<FieldSchema> cols = Lists.newArrayList();
cols.add(new FieldSchema("col1", "string", ""));
cols.add(new FieldSchema("col2", "tinyint", ""));
cols.add(new FieldSchema("col3", "map<string,string>", ""));
cols.add(new FieldSchema("hashMap", "string", ""));
sd.setCols(cols);
table.setSd(sd);
exceptionRule.expect(MetaException.class);
exceptionRule.expectMessage("Could not find column mapping for column: col2");
storageHandler.checkTableSchemaMapping(description, table);
}
示例5: testCheckTableSchemaMappingValid
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Test
public void testCheckTableSchemaMappingValid() throws MetaException {
TableDescription description = getHashRangeTable();
Table table = new Table();
Map<String, String> parameters = Maps.newHashMap();
parameters.put(DynamoDBConstants.DYNAMODB_COLUMN_MAPPING, "col1:dynamo_col1$," +
"col2:dynamo_col2#,hashKey:hashKey");
table.setParameters(parameters);
StorageDescriptor sd = new StorageDescriptor();
List<FieldSchema> cols = Lists.newArrayList();
cols.add(new FieldSchema("col1", "string", ""));
cols.add(new FieldSchema("col2", "bigint", ""));
cols.add(new FieldSchema("hashKey", "string", ""));
sd.setCols(cols);
table.setSd(sd);
storageHandler.checkTableSchemaMapping(description, table);
}
示例6: testCheckTableSchemaTypeInvalidType
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Test
public void testCheckTableSchemaTypeInvalidType() throws MetaException {
TableDescription description = getHashRangeTable();
Table table = new Table();
Map<String, String> parameters = Maps.newHashMap();
parameters.put(DynamoDBConstants.DYNAMODB_COLUMN_MAPPING, "col1:dynamo_col1$," +
"col2:dynamo_col2#,hashKey:hashKey");
table.setParameters(parameters);
StorageDescriptor sd = new StorageDescriptor();
List<FieldSchema> cols = Lists.newArrayList();
cols.add(new FieldSchema("col1", "string", ""));
cols.add(new FieldSchema("col2", "tinyint", ""));
cols.add(new FieldSchema("hashKey", "string", ""));
sd.setCols(cols);
table.setSd(sd);
exceptionRule.expect(MetaException.class);
exceptionRule.expectMessage("The hive type tinyint is not supported in DynamoDB");
storageHandler.checkTableSchemaType(description, table);
}
示例7: testCheckTableSchemaTypeInvalidHashKeyType
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Test
public void testCheckTableSchemaTypeInvalidHashKeyType() throws MetaException {
TableDescription description = getHashRangeTable();
Table table = new Table();
Map<String, String> parameters = Maps.newHashMap();
parameters.put(DynamoDBConstants.DYNAMODB_COLUMN_MAPPING, "col1:dynamo_col1$," +
"col2:dynamo_col2#,hashKey:hashKey");
table.setParameters(parameters);
StorageDescriptor sd = new StorageDescriptor();
List<FieldSchema> cols = Lists.newArrayList();
cols.add(new FieldSchema("col1", "string", ""));
cols.add(new FieldSchema("col2", "bigint", ""));
cols.add(new FieldSchema("hashKey", "map<string,string>", ""));
sd.setCols(cols);
table.setSd(sd);
exceptionRule.expect(MetaException.class);
exceptionRule.expectMessage("The key element hashKey does not match type. DynamoDB Type: S " +
"Hive type: " + "map<string,string>");
storageHandler.checkTableSchemaType(description, table);
}
示例8: testCheckTableSchemaTypeValid
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Test
public void testCheckTableSchemaTypeValid() throws MetaException {
TableDescription description = getHashRangeTable();
Table table = new Table();
Map<String, String> parameters = Maps.newHashMap();
parameters.put(DynamoDBConstants.DYNAMODB_COLUMN_MAPPING, "col1:dynamo_col1$," +
"col2:dynamo_col2#,hashKey:hashKey");
table.setParameters(parameters);
StorageDescriptor sd = new StorageDescriptor();
List<FieldSchema> cols = Lists.newArrayList();
cols.add(new FieldSchema("col1", "string", ""));
cols.add(new FieldSchema("col2", "bigint", ""));
cols.add(new FieldSchema("hashKey", "string", ""));
sd.setCols(cols);
table.setSd(sd);
// This check is expected to pass for the given input
storageHandler.checkTableSchemaType(description, table);
}
示例9: addColumn
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Override
public void addColumn(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnMetadata column)
{
if (!allowAddColumn) {
throw new PrestoException(PERMISSION_DENIED, "Adding Columns is disabled in this Hive catalog");
}
HiveTableHandle handle = checkType(tableHandle, HiveTableHandle.class, "tableHandle");
Optional<Table> tableMetadata = metastore.getTable(handle.getSchemaName(), handle.getTableName());
if (!tableMetadata.isPresent()) {
throw new TableNotFoundException(handle.getSchemaTableName());
}
Table table = tableMetadata.get();
StorageDescriptor sd = table.getSd();
ImmutableList.Builder<FieldSchema> columns = ImmutableList.builder();
columns.addAll(sd.getCols());
columns.add(new FieldSchema(column.getName(), toHiveType(column.getType()).getHiveTypeName(), column.getComment()));
sd.setCols(columns.build());
table.setSd(sd);
metastore.alterTable(handle.getSchemaName(), handle.getTableName(), table);
}
示例10: addTestPartition
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public Partition addTestPartition(Table tbl, List<String> values, int createTime) throws Exception {
StorageDescriptor partitionSd = new StorageDescriptor();
if (StringUtils.isNotBlank(tbl.getSd().getLocation())) {
partitionSd.setLocation(tbl.getSd().getLocation() + values);
} else {
partitionSd.setLocation("/tmp/" + tbl.getTableName() + "/part1");
}
partitionSd.setSerdeInfo(
new SerDeInfo("name", "serializationLib", ImmutableMap.of(HiveAvroSerDeManager.SCHEMA_URL, "/tmp/dummy")));
partitionSd.setCols(tbl.getPartitionKeys());
Partition partition =
new Partition(values, tbl.getDbName(), tbl.getTableName(), 1, 1, partitionSd, new HashMap<String, String>());
partition.setCreateTime(createTime);
return this.getLocalMetastoreClient().add_partition(partition);
}
示例11: makeMetastoreTableObject
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public Table makeMetastoreTableObject(HiveMetaStoreClient client,
String dbName, String tabName, List<FieldSchema> cols) throws Exception {
Table tbl = new Table();
tbl.setDbName(dbName);
tbl.setTableName(tabName);
StorageDescriptor sd = new StorageDescriptor();
tbl.setSd(sd);
tbl.setParameters(new HashMap<String, String>());
sd.setCols(cols);
sd.setCompressed(false);
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters()
.put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.setSortCols(new ArrayList<Order>());
return tbl;
}
示例12: createTable
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private void createTable(File sourceTableUri) throws Exception {
File partitionEurope = new File(sourceTableUri, "local_date=2000-01-01");
File partitionUk = new File(partitionEurope, "local_hour=0");
File dataFileUk = new File(partitionUk, PART_00000);
FileUtils.writeStringToFile(dataFileUk, "1\tadam\tlondon\n2\tsusan\tglasgow\n");
File partitionAsia = new File(sourceTableUri, "local_date=2000-01-02");
File partitionChina = new File(partitionAsia, "local_hour=0");
File dataFileChina = new File(partitionChina, PART_00000);
String data = "1\tchun\tbeijing\n2\tshanghai\tmilan\n";
FileUtils.writeStringToFile(dataFileChina, data);
HiveMetaStoreClient sourceClient = sourceCatalog.client();
Table source = new Table();
source.setDbName(DATABASE);
source.setTableName(TABLE);
source.setTableType(TableType.EXTERNAL_TABLE.name());
source.setParameters(new HashMap<String, String>());
List<FieldSchema> partitionColumns = Arrays.asList(new FieldSchema("local_date", "string", ""),
new FieldSchema("local_hour", "string", ""));
source.setPartitionKeys(partitionColumns);
List<FieldSchema> dataColumns = Arrays.asList(new FieldSchema("id", "bigint", ""),
new FieldSchema("name", "string", ""), new FieldSchema("city", "tinyint", ""));
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(dataColumns);
sd.setLocation(sourceTableUri.toURI().toString());
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
source.setSd(sd);
sourceClient.createTable(source);
LOG.info(">>>> Partitions added: {}",
+sourceClient.add_partitions(Arrays.asList(newPartition(sd, Arrays.asList("2000-01-01", "0"), partitionUk),
newPartition(sd, Arrays.asList("2000-01-02", "0"), partitionChina))));
}
示例13: before
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Before
public void before() throws TException, IOException {
Table table = new Table();
table.setDbName(DATABASE);
table.setTableName("source_" + TABLE);
table.setTableType(TableType.EXTERNAL_TABLE.name());
table.putToParameters("EXTERNAL", "TRUE");
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(Arrays.asList(new FieldSchema("col1", "string", null)));
sd.setSerdeInfo(new SerDeInfo());
table.setSd(sd);
hive.client().createTable(table);
}
示例14: newPartition
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private Partition newPartition(String... values) {
Partition partition = new Partition();
partition.setDbName(DB_NAME);
partition.setTableName(TABLE_NAME);
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation(new Path(tableLocation, partitionName(values)).toUri().toString());
sd.setCols(FIELDS);
partition.setSd(sd);
HashMap<String, String> parameters = new HashMap<>();
parameters.put(StatsSetupConst.ROW_COUNT, "1");
partition.setParameters(parameters);
partition.setValues(Arrays.asList(values));
return partition;
}
示例15: newStorageDescriptor
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public static StorageDescriptor newStorageDescriptor(File location, String... columns) {
StorageDescriptor sd = new StorageDescriptor();
List<FieldSchema> cols = new ArrayList<>(columns.length);
for (String name : columns) {
cols.add(newFieldSchema(name));
}
sd.setCols(cols);
sd.setSerdeInfo(new SerDeInfo());
sd.setLocation(location.toURI().toString());
return sd;
}