本文整理汇总了Java中org.apache.hadoop.hive.metastore.MetaStoreUtils类的典型用法代码示例。如果您正苦于以下问题:Java MetaStoreUtils类的具体用法?Java MetaStoreUtils怎么用?Java MetaStoreUtils使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MetaStoreUtils类属于org.apache.hadoop.hive.metastore包,在下文中一共展示了MetaStoreUtils类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: transform
import org.apache.hadoop.hive.metastore.MetaStoreUtils; //导入依赖的package包/类
@Override
public Table transform(Table table) {
if (!MetaStoreUtils.isView(table)) {
return table;
}
LOG.info("Translating HQL of view {}.{}", table.getDbName(), table.getTableName());
String tableQualifiedName = Warehouse.getQualifiedName(table);
String hql = hqlTranslator.translate(tableQualifiedName, table.getViewOriginalText());
String expandedHql = hqlTranslator.translate(tableQualifiedName, table.getViewExpandedText());
Table transformedView = new Table(table);
transformedView.setViewOriginalText(hql);
transformedView.setViewExpandedText(expandedHql);
if (!replicaHiveConf.getBoolean(SKIP_TABLE_EXIST_CHECKS, false)) {
LOG.info("Validating that tables used by the view {}.{} exist in the replica catalog", table.getDbName(),
table.getTableName());
validateReferencedTables(transformedView);
}
return transformedView;
}
示例2: getLocationManager
import org.apache.hadoop.hive.metastore.MetaStoreUtils; //导入依赖的package包/类
public SourceLocationManager getLocationManager(
Table table,
List<Partition> partitions,
String eventId,
Map<String, Object> copierOptions)
throws IOException {
if (MetaStoreUtils.isView(table)) {
return new ViewLocationManager();
}
HdfsSnapshotLocationManager hdfsSnapshotLocationManager = new HdfsSnapshotLocationManager(getHiveConf(), eventId,
table, partitions, snapshotsDisabled, sourceTableLocation, sourceCatalogListener);
boolean ignoreMissingFolder = MapUtils.getBooleanValue(copierOptions,
CopierOptions.IGNORE_MISSING_PARTITION_FOLDER_ERRORS, false);
if (ignoreMissingFolder) {
return new FilterMissingPartitionsLocationManager(hdfsSnapshotLocationManager, getHiveConf());
}
return hdfsSnapshotLocationManager;
}
示例3: partition_name_has_valid_characters
import org.apache.hadoop.hive.metastore.MetaStoreUtils; //导入依赖的package包/类
/**
* {@inheritDoc}
*/
@Override
public boolean partition_name_has_valid_characters(final List<String> partVals, final boolean throwException)
throws TException {
return requestWrapper("partition_name_has_valid_characters", new Object[]{partVals, throwException},
() -> {
Pattern pattern = null;
final String partitionPattern = config.getHivePartitionWhitelistPattern();
if (!Strings.isNullOrEmpty(partitionPattern)) {
pattern = PATTERNS.getUnchecked(partitionPattern);
}
if (throwException) {
MetaStoreUtils.validatePartitionNameCharacters(partVals, pattern);
return true;
} else {
return MetaStoreUtils.partitionNameHasValidCharacters(partVals, pattern);
}
});
}
示例4: preCreateTable
import org.apache.hadoop.hive.metastore.MetaStoreUtils; //导入依赖的package包/类
@Override
public void preCreateTable(Table table) throws MetaException {
DynamoDBClient client = createDynamoDBClient(table);
try {
boolean isExternal = MetaStoreUtils.isExternalTable(table);
if (!isExternal) {
throw new MetaException("Only EXTERNAL tables are supported for DynamoDB.");
}
String tableName = HiveDynamoDBUtil.getDynamoDBTableName(table.getParameters()
.get(DynamoDBConstants.TABLE_NAME), table.getTableName());
TableDescription tableDescription = client.describeTable(tableName);
checkTableStatus(tableDescription);
checkTableSchemaMapping(tableDescription, table);
checkTableSchemaType(tableDescription, table);
} finally {
client.close();
}
}
示例5: getPartitionMetadata
import org.apache.hadoop.hive.metastore.MetaStoreUtils; //导入依赖的package包/类
/**
* Wrapper around {@link MetaStoreUtils#getPartitionMetadata(Partition, Table)} which also adds parameters from table
* to properties returned by {@link MetaStoreUtils#getPartitionMetadata(Partition, Table)}.
*
* @param partition the source of partition level parameters
* @param table the source of table level parameters
* @return properties
*/
public static Properties getPartitionMetadata(final HivePartition partition, final HiveTableWithColumnCache table) {
final Properties properties;
restoreColumns(table, partition);
properties = MetaStoreUtils.getPartitionMetadata(partition, table);
// SerDe expects properties from Table, but above call doesn't add Table properties.
// Include Table properties in final list in order to not to break SerDes that depend on
// Table properties. For example AvroSerDe gets the schema from properties (passed as second argument)
for (Map.Entry<String, String> entry : table.getParameters().entrySet()) {
if (entry.getKey() != null && entry.getKey() != null) {
properties.put(entry.getKey(), entry.getValue());
}
}
return properties;
}
示例6: commitDropTable
import org.apache.hadoop.hive.metastore.MetaStoreUtils; //导入依赖的package包/类
@Override
public void commitDropTable(Table tbl, boolean deleteData)
throws MetaException {
KuduClient client = getKuduClient(tbl.getParameters().get(HiveKuduConstants.MASTER_ADDRESS_NAME));
String tablename = getKuduTableName(tbl);
boolean isExternal = MetaStoreUtils.isExternalTable(tbl);
try {
if (deleteData && !isExternal) {
client.deleteTable(tablename);
}
} catch (Exception ioe) {
throw new MetaException("Error dropping table:" +tablename);
} finally {
try {
client.shutdown();
} catch (Exception e) {
e.printStackTrace();
}
}
}
示例7: rollbackCreateTable
import org.apache.hadoop.hive.metastore.MetaStoreUtils; //导入依赖的package包/类
@Override
public void rollbackCreateTable(Table tbl) throws MetaException {
KuduClient client = getKuduClient(tbl.getParameters().get(HiveKuduConstants.MASTER_ADDRESS_NAME));
String tablename = getKuduTableName(tbl);
boolean isExternal = MetaStoreUtils.isExternalTable(tbl);
try {
if ( client.tableExists(tablename) && !isExternal) {
client.deleteTable(tablename);
}
} catch (Exception ioe) {
throw new MetaException("Error dropping table while rollback of create table:" +tablename);
} finally {
try {
client.shutdown();
} catch (Exception e) {
e.printStackTrace();
}
}
}
示例8: start
import org.apache.hadoop.hive.metastore.MetaStoreUtils; //导入依赖的package包/类
public void start(Map<String, String> confOverlay) throws Exception {
if (isMetastoreRemote) {
int metaStorePort = MetaStoreUtils.findFreePort();
getHiveConf().setVar(ConfVars.METASTOREURIS, "thrift://localhost:" + metaStorePort);
MetaStoreUtils.startMetaStore(metaStorePort,
ShimLoader.getHadoopThriftAuthBridge(), getHiveConf());
}
hiveServer2 = new HiveServer2();
// Set confOverlay parameters
for (Map.Entry<String, String> entry : confOverlay.entrySet()) {
setConfProperty(entry.getKey(), entry.getValue());
}
hiveServer2.init(getHiveConf());
hiveServer2.start();
waitForStartup();
setStarted(true);
}
示例9: HCatTableInfo
import org.apache.hadoop.hive.metastore.MetaStoreUtils; //导入依赖的package包/类
/**
* Initializes a new HCatTableInfo instance to be used with
* {@link org.apache.hive.hcatalog.mapreduce.HCatInputFormat} for reading data from
* a table. Work with hadoop security, the kerberos principal name of the server
* - else null. The principal name should be of the form:
* <servicename>/[email protected]<realm> like "hcat/[email protected]"
* The special string _HOST will be replaced automatically with the correct host name
* @param databaseName the db name
* @param tableName the table name
* @param dataColumns schema of columns which contain data
* @param partitionColumns schema of partition columns
* @param storerInfo information about storage descriptor
* @param table hive metastore table class
*/
HCatTableInfo(
String databaseName,
String tableName,
HCatSchema dataColumns,
HCatSchema partitionColumns,
StorerInfo storerInfo,
Table table) {
this.databaseName = (databaseName == null) ?
MetaStoreUtils.DEFAULT_DATABASE_NAME : databaseName;
this.tableName = tableName;
this.dataColumns = dataColumns;
this.table = table;
this.storerInfo = storerInfo;
this.partitionColumns = partitionColumns;
}
示例10: getFieldSchemas
import org.apache.hadoop.hive.metastore.MetaStoreUtils; //导入依赖的package包/类
/**
* First tries getting the {@code FieldSchema}s from the {@code HiveRegistrationUnit}'s columns, if set.
* Else, gets the {@code FieldSchema}s from the deserializer.
*/
private static List<FieldSchema> getFieldSchemas(HiveRegistrationUnit unit) {
List<Column> columns = unit.getColumns();
List<FieldSchema> fieldSchemas = new ArrayList<>();
if (columns != null && columns.size() > 0) {
fieldSchemas = getFieldSchemas(columns);
} else {
Deserializer deserializer = getDeserializer(unit);
if (deserializer != null) {
try {
fieldSchemas = MetaStoreUtils.getFieldsFromDeserializer(unit.getTableName(), deserializer);
} catch (SerDeException | MetaException e) {
LOG.warn("Encountered exception while getting fields from deserializer.", e);
}
}
}
return fieldSchemas;
}
示例11: getThriftSchema
import org.apache.hadoop.hive.metastore.MetaStoreUtils; //导入依赖的package包/类
/**
* Get a Schema with fields represented with Thrift DDL types
*/
public Schema getThriftSchema() throws Exception {
Schema schema;
try {
schema = getSchema();
if (schema != null) {
List<FieldSchema> lst = schema.getFieldSchemas();
// Go over the schema and convert type to thrift type
if (lst != null) {
for (FieldSchema f : lst) {
f.setType(MetaStoreUtils.typeToThriftType(f.getType()));
}
}
}
} catch (Exception e) {
e.printStackTrace();
throw e;
}
LOG.info("Returning Thrift schema: " + schema);
return schema;
}
示例12: commitDropTable
import org.apache.hadoop.hive.metastore.MetaStoreUtils; //导入依赖的package包/类
@Override
public void commitDropTable(Table table, boolean deleteData) throws MetaException {
//TODO: Should this be implemented to drop the table and its data from cassandra
boolean isExternal = MetaStoreUtils.isExternalTable(table);
if (deleteData && !isExternal) {
CqlManager manager = new CqlManager(table);
try {
//open connection to cassandra
manager.openConnection();
//drop the table
manager.dropTable();
} finally {
manager.closeConnection();
}
}
}
示例13: commitDropTable
import org.apache.hadoop.hive.metastore.MetaStoreUtils; //导入依赖的package包/类
@Override
public void commitDropTable(Table table, boolean deleteData) throws MetaException {
//TODO: Should this be implemented to drop the table and its data from cassandra
boolean isExternal = MetaStoreUtils.isExternalTable(table);
if (deleteData && !isExternal) {
CassandraManager manager = new CassandraManager(table);
try {
//open connection to cassandra
manager.openConnection();
//drop the table
manager.dropTable();
} finally {
manager.closeConnection();
}
}
}
示例14: commitDropTable
import org.apache.hadoop.hive.metastore.MetaStoreUtils; //导入依赖的package包/类
@Override
public void commitDropTable(Table table, boolean deleteData) throws MetaException {
//TODO: Should this be implemented to drop the table and its data from cassandra
boolean isExternal = MetaStoreUtils.isExternalTable(table);
if (deleteData && !isExternal) {
CassandraManager manager = new CassandraManager(table);
try {
//open connection to cassandra
manager.openConnection();
//drop the table
manager.dropTable();
} finally {
manager.closeConnection();
}
}
}
示例15: validate
import org.apache.hadoop.hive.metastore.MetaStoreUtils; //导入依赖的package包/类
private void validate(TableReplication tableReplication, Source source, Replica replica) {
source.getDatabase(tableReplication.getSourceTable().getDatabaseName());
replica.getDatabase(tableReplication.getReplicaDatabaseName());
TableAndStatistics sourceTableAndStatistics = source.getTableAndStatistics(tableReplication);
if (tableReplication.getReplicationMode() != ReplicationMode.METADATA_MIRROR
&& MetaStoreUtils.isView(sourceTableAndStatistics.getTable())) {
throw new CircusTrainException(String.format("Cannot replicate view %s. Only %s is supported for views",
tableReplication.getSourceTable().getQualifiedName(), ReplicationMode.METADATA_MIRROR.name()));
}
}