本文整理汇总了Java中org.apache.hadoop.hive.ql.metadata.Table类的典型用法代码示例。如果您正苦于以下问题:Java Table类的具体用法?Java Table怎么用?Java Table使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Table类属于org.apache.hadoop.hive.ql.metadata包,在下文中一共展示了Table类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: constructAvroTable
import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
private Table constructAvroTable(String database, String tableName, Schema schema, Partitioner partitioner)
throws HiveMetaStoreException {
Table table = newTable(database, tableName);
table.setTableType(TableType.EXTERNAL_TABLE);
table.getParameters().put("EXTERNAL", "TRUE");
String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
table.setDataLocation(new Path(tablePath));
table.setSerializationLib(avroSerde);
try {
table.setInputFormatClass(avroInputFormat);
table.setOutputFormatClass(avroOutputFormat);
} catch (HiveException e) {
throw new HiveMetaStoreException("Cannot find input/output format:", e);
}
List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
table.setFields(columns);
table.setPartCols(partitioner.partitionFields());
table.getParameters().put(AVRO_SCHEMA_LITERAL, avroData.fromConnectSchema(schema).toString());
return table;
}
示例2: constructParquetTable
import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
private Table constructParquetTable(String database, String tableName, Schema schema, Partitioner partitioner) throws HiveMetaStoreException {
Table table = newTable(database, tableName);
table.setTableType(TableType.EXTERNAL_TABLE);
table.getParameters().put("EXTERNAL", "TRUE");
String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
table.setDataLocation(new Path(tablePath));
table.setSerializationLib(getHiveParquetSerde());
try {
table.setInputFormatClass(getHiveParquetInputFormat());
table.setOutputFormatClass(getHiveParquetOutputFormat());
} catch (HiveException e) {
throw new HiveMetaStoreException("Cannot find input/output format:", e);
}
// convert copycat schema schema to Hive columns
List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
table.setFields(columns);
table.setPartCols(partitioner.partitionFields());
return table;
}
示例3: constructAvroTable
import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
private Table constructAvroTable(String database, String tableName, Schema schema, Partitioner partitioner)
throws HiveMetaStoreException {
Table table = new Table(database, tableName);
table.setTableType(TableType.EXTERNAL_TABLE);
table.getParameters().put("EXTERNAL", "TRUE");
String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
table.setDataLocation(new Path(tablePath));
table.setSerializationLib(avroSerde);
try {
table.setInputFormatClass(avroInputFormat);
table.setOutputFormatClass(avroOutputFormat);
} catch (HiveException e) {
throw new HiveMetaStoreException("Cannot find input/output format:", e);
}
List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
table.setFields(columns);
table.setPartCols(partitioner.partitionFields());
table.getParameters().put(AVRO_SCHEMA_LITERAL, avroData.fromConnectSchema(schema).toString());
return table;
}
示例4: constructParquetTable
import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
private Table constructParquetTable(String database, String tableName, Schema schema, Partitioner partitioner) throws HiveMetaStoreException {
Table table = new Table(database, tableName);
table.setTableType(TableType.EXTERNAL_TABLE);
table.getParameters().put("EXTERNAL", "TRUE");
String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
table.setDataLocation(new Path(tablePath));
table.setSerializationLib(getHiveParquetSerde());
try {
table.setInputFormatClass(getHiveParquetInputFormat());
table.setOutputFormatClass(getHiveParquetOutputFormat());
} catch (HiveException e) {
throw new HiveMetaStoreException("Cannot find input/output format:", e);
}
// convert copycat schema schema to Hive columns
List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
table.setFields(columns);
table.setPartCols(partitioner.partitionFields());
return table;
}
示例5: getEstimatedSizeBytes
import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
/**
* Returns the size of the table in bytes, does not take into consideration filter/partition
* details passed, if any.
*/
@Override
public long getEstimatedSizeBytes(PipelineOptions pipelineOptions) throws Exception {
Configuration conf = new Configuration();
for (Entry<String, String> entry : spec.getConfigProperties().entrySet()) {
conf.set(entry.getKey(), entry.getValue());
}
IMetaStoreClient client = null;
try {
HiveConf hiveConf = HCatUtil.getHiveConf(conf);
client = HCatUtil.getHiveMetastoreClient(hiveConf);
Table table = HCatUtil.getTable(client, spec.getDatabase(), spec.getTable());
return StatsUtils.getFileSizeForTable(hiveConf, table);
} finally {
// IMetaStoreClient is not AutoCloseable, closing it manually
if (client != null) {
client.close();
}
}
}
示例6: insertThriftRenameTableLogEntry
import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
/**
* Insert a thrift audit log entry that represents renaming a table.
*
* @param oldTable the source table
* @param newTable the table renamed to
* @param hiveConf Hive configuration
* @throws Exception if there's an error inserting into the audit log
*/
public static void insertThriftRenameTableLogEntry(
org.apache.hadoop.hive.metastore.api.Table oldTable,
org.apache.hadoop.hive.metastore.api.Table newTable,
HiveConf hiveConf) throws Exception {
final MetastoreAuditLogListener metastoreAuditLogListener =
new MetastoreAuditLogListener(hiveConf);
AlterTableEvent event = new AlterTableEvent(
oldTable,
newTable,
true,
null
);
metastoreAuditLogListener.onAlterTable(event);
}
示例7: onCreateTable
import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
/**
* Listener which fires when a table is created.
*
* <p>For auditing purposes the read/write differential is the non-existence
* and existence of the created table respectively.</p>
*
* @param event The create table event
*/
@Override
public void onCreateTable(CreateTableEvent event) throws MetaException {
try {
Set<ReadEntity> readEntities = new HashSet<>();
Set<WriteEntity> writeEntities = new HashSet<>();
writeEntities.add(
new WriteEntity(
new Table(event.getTable()),
WriteType.INSERT
)
);
run(readEntities, writeEntities, HiveOperation.THRIFT_CREATE_TABLE);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
示例8: onAlterTable
import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
/**
* Listener which fires when a table is altered.
*
* <p>For auditing purposes the read/write differential is the old and new
* table respectively.</p>
*
* @param event The add partition event
*/
@Override
public void onAlterTable(AlterTableEvent event) throws MetaException {
try {
Set<ReadEntity> readEntities = new HashSet<>();
readEntities.add(new ReadEntity(new Table(event.getOldTable())));
Set<WriteEntity> writeEntities = new HashSet<>();
writeEntities.add(
new WriteEntity(
new Table(event.getNewTable()),
WriteType.INSERT
)
);
run(readEntities, writeEntities, HiveOperation.THRIFT_ALTER_TABLE);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
示例9: onAddPartition
import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
/**
* Listener which fires when a partition is added.
*
* <p>For auditing purposes the read/write differential is the non-existence
* and existence of the added partition respectively.</p>
*
* @param event The add partition event
*/
@Override
public void onAddPartition(AddPartitionEvent event) throws MetaException {
try {
Table table = new Table(event.getTable());
Set<ReadEntity> readEntities = new HashSet<>();
Set<WriteEntity> writeEntities = new HashSet<>();
for (org.apache.hadoop.hive.metastore.api.Partition partition :
event.getPartitions()) {
writeEntities.add(
new WriteEntity(
new Partition(table, partition),
WriteType.INSERT
)
);
}
run(readEntities, writeEntities, HiveOperation.THRIFT_ADD_PARTITION);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
示例10: onDropPartition
import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
/**
* Listener which fires when a partition is dropped.
*
* <p>For auditing purposes the read/write differential is the existence and
* non-existence of the dropped partition respectively.</p>
*
* @param event The drop partition event
*/
@Override
public void onDropPartition(DropPartitionEvent event) throws MetaException {
try {
Set<ReadEntity> readEntities = new HashSet<>();
readEntities.add(
new ReadEntity(
new Partition(new Table(event.getTable()), event.getPartition())
)
);
Set<WriteEntity> writeEntities = new HashSet<>();
run(readEntities, writeEntities, HiveOperation.THRIFT_DROP_PARTITION);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
示例11: getCreateTableString
import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
private String getCreateTableString(Table table, String location){
String colString = "";
List<FieldSchema> colList = table.getAllCols();
if ( colList != null) {
for (FieldSchema col : colList) {
colString += col.getName() + " " + col.getType() + ",";
}
if (colList.size() > 0) {
colString = colString.substring(0, colString.length() - 1);
colString = "(" + colString + ")";
}
}
String query = "create external table " + table.getTableName() + colString +
" location '" + location + "'";
return query;
}
示例12: registerTable
import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
private Referenceable registerTable(Referenceable dbReference, Table table) throws AtlasHookException {
try {
String dbName = table.getDbName();
String tableName = table.getTableName();
LOG.info("Attempting to register table [{}]", tableName);
Referenceable tableReference = getTableReference(table);
LOG.info("Found result {}", tableReference);
if (tableReference == null) {
tableReference = createTableInstance(dbReference, table);
tableReference = registerInstance(tableReference);
} else {
LOG.info("Table {}.{} is already registered with id {}. Updating entity.", dbName, tableName,
tableReference.getId().id);
tableReference = createOrUpdateTableInstance(dbReference, tableReference, table);
updateInstance(tableReference);
}
return tableReference;
} catch (Exception e) {
throw new AtlasHookException("HiveMetaStoreBridge.getStorageDescQFName() failed.", e);
}
}
示例13: replaceTableQFName
import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
private Referenceable replaceTableQFName(HiveEventContext event, Table oldTable, Table newTable, final Referenceable tableEntity, final String oldTableQFName, final String newTableQFName) throws HiveException {
tableEntity.set(AtlasClient.NAME, oldTable.getTableName().toLowerCase());
tableEntity.set(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, oldTableQFName);
//Replace table entity with new name
final Referenceable newEntity = new Referenceable(HiveDataTypes.HIVE_TABLE.getName());
newEntity.set(AtlasClient.NAME, newTable.getTableName().toLowerCase());
newEntity.set(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, newTableQFName);
ArrayList<String> alias_list = new ArrayList<>();
alias_list.add(oldTable.getTableName().toLowerCase());
newEntity.set(HiveMetaStoreBridge.TABLE_ALIAS_LIST, alias_list);
event.addMessage(new HookNotification.EntityPartialUpdateRequest(event.getUser(),
HiveDataTypes.HIVE_TABLE.getName(), AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME,
oldTableQFName, newEntity));
return newEntity;
}
示例14: testImportThatUpdatesRegisteredTable
import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
@Test
public void testImportThatUpdatesRegisteredTable() throws Exception {
setupDB(hiveClient, TEST_DB_NAME);
List<Table> hiveTables = setupTables(hiveClient, TEST_DB_NAME, TEST_TABLE_NAME);
returnExistingDatabase(TEST_DB_NAME, atlasClient, CLUSTER_NAME);
// return existing table
when(atlasClient.getEntity(HiveDataTypes.HIVE_TABLE.getName(),
AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, HiveMetaStoreBridge.getTableQualifiedName(CLUSTER_NAME, TEST_DB_NAME, TEST_TABLE_NAME)))
.thenReturn(getEntityReference(HiveDataTypes.HIVE_TABLE.getName(), "82e06b34-9151-4023-aa9d-b82103a50e77"));
when(atlasClient.getEntity("82e06b34-9151-4023-aa9d-b82103a50e77")).thenReturn(createTableReference());
String processQualifiedName = HiveMetaStoreBridge.getTableProcessQualifiedName(CLUSTER_NAME, hiveTables.get(0));
when(atlasClient.getEntity(HiveDataTypes.HIVE_PROCESS.getName(),
AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, processQualifiedName)).thenReturn(getEntityReference(HiveDataTypes.HIVE_PROCESS.getName(), "82e06b34-9151-4023-aa9d-b82103a50e77"));
HiveMetaStoreBridge bridge = new HiveMetaStoreBridge(CLUSTER_NAME, hiveClient, atlasClient);
bridge.importHiveMetadata(true);
// verify update is called on table
verify(atlasClient).updateEntity(eq("82e06b34-9151-4023-aa9d-b82103a50e77"),
(Referenceable) argThat(new MatchesReferenceableProperty(HiveMetaStoreBridge.TABLE_TYPE_ATTR,
TableType.EXTERNAL_TABLE.name())));
}
示例15: testImportContinuesWhenTableRegistrationFails
import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
@Test
public void testImportContinuesWhenTableRegistrationFails() throws Exception {
setupDB(hiveClient, TEST_DB_NAME);
final String table2Name = TEST_TABLE_NAME + "_1";
List<Table> hiveTables = setupTables(hiveClient, TEST_DB_NAME, TEST_TABLE_NAME, table2Name);
returnExistingDatabase(TEST_DB_NAME, atlasClient, CLUSTER_NAME);
when(hiveClient.getTable(TEST_DB_NAME, TEST_TABLE_NAME)).thenThrow(new RuntimeException("Timeout while reading data from hive metastore"));
when(atlasClient.getEntity(HiveDataTypes.HIVE_TABLE.getName(), AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, HiveMetaStoreBridge.getTableQualifiedName(CLUSTER_NAME, TEST_DB_NAME,
table2Name))).thenReturn(
getEntityReference(HiveDataTypes.HIVE_TABLE.getName(), "82e06b34-9151-4023-aa9d-b82103a50e77"));
when(atlasClient.getEntity("82e06b34-9151-4023-aa9d-b82103a50e77")).thenReturn(createTableReference());
String processQualifiedName = HiveMetaStoreBridge.getTableProcessQualifiedName(CLUSTER_NAME, hiveTables.get(1));
when(atlasClient.getEntity(HiveDataTypes.HIVE_PROCESS.getName(), AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME,
processQualifiedName)).thenReturn(getEntityReference(HiveDataTypes.HIVE_PROCESS.getName(), "82e06b34-9151-4023-aa9d-b82103a50e77"));
HiveMetaStoreBridge bridge = new HiveMetaStoreBridge(CLUSTER_NAME, hiveClient, atlasClient);
try {
bridge.importHiveMetadata(false);
} catch (Exception e) {
Assert.fail("Table registration failed with exception", e);
}
}