本文整理汇总了Java中org.apache.hadoop.hive.metastore.api.Table类的典型用法代码示例。如果您正苦于以下问题:Java Table类的具体用法?Java Table怎么用?Java Table使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Table类属于org.apache.hadoop.hive.metastore.api包,在下文中一共展示了Table类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createPartitionedTable
import org.apache.hadoop.hive.metastore.api.Table; //导入依赖的package包/类
private Table createPartitionedTable(String databaseName, String tableName) throws Exception {
Table table = new Table();
table.setDbName(DATABASE);
table.setTableName(tableName);
table.setPartitionKeys(Arrays.asList(new FieldSchema("partcol", "int", null)));
table.setSd(new StorageDescriptor());
table.getSd().setCols(Arrays.asList(new FieldSchema("id", "int", null), new FieldSchema("name", "string", null)));
table.getSd().setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
table.getSd().setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat");
table.getSd().setSerdeInfo(new SerDeInfo());
table.getSd().getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
HiveMetaStoreClient client = server.newClient();
client.createTable(table);
client.close();
return table;
}
示例2: HiveTextRecordReader
import org.apache.hadoop.hive.metastore.api.Table; //导入依赖的package包/类
public HiveTextRecordReader(Table table, Partition partition, InputSplit inputSplit, List<SchemaPath> projectedColumns, FragmentContext context) throws ExecutionSetupException {
super(table, partition, inputSplit, projectedColumns, context, null);
String d = table.getSd().getSerdeInfo().getParameters().get("field.delim");
if (d != null) {
delimiter = d.getBytes()[0];
} else {
delimiter = (byte) 1;
}
assert delimiter > 0;
List<Integer> ids = Lists.newArrayList();
for (int i = 0; i < tableColumns.size(); i++) {
if (selectedColumnNames.contains(tableColumns.get(i))) {
ids.add(i);
}
}
columnIds = ids;
numCols = tableColumns.size();
}
示例3: createPartitionedTable
import org.apache.hadoop.hive.metastore.api.Table; //导入依赖的package包/类
void createPartitionedTable(URI sourceTableUri) throws Exception {
Table hiveTable = TestUtils.createPartitionedTable(metaStoreClient, DATABASE, SOURCE_PARTITIONED_TABLE,
sourceTableUri);
URI partitionEurope = URI.create(sourceTableUri + "/continent=Europe");
URI partitionUk = URI.create(partitionEurope + "/country=UK");
File dataFileUk = new File(partitionUk.getPath(), PART_00000);
FileUtils.writeStringToFile(dataFileUk, "1\tadam\tlondon\n2\tsusan\tglasgow\n");
URI partitionAsia = URI.create(sourceTableUri + "/continent=Asia");
URI partitionChina = URI.create(partitionAsia + "/country=China");
File dataFileChina = new File(partitionChina.getPath(), PART_00000);
FileUtils.writeStringToFile(dataFileChina, "1\tchun\tbeijing\n2\tshanghai\tmilan\n");
LOG.info(">>>> Partitions added: {}",
metaStoreClient
.add_partitions(Arrays.asList(newTablePartition(hiveTable, Arrays.asList("Europe", "UK"), partitionUk),
newTablePartition(hiveTable, Arrays.asList("Asia", "China"), partitionChina))));
}
示例4: tablesMatchEachOther
import org.apache.hadoop.hive.metastore.api.Table; //导入依赖的package包/类
@Test
public void tablesMatchEachOther() throws Exception {
Table sourceTable = catalog.client().getTable(DATABASE, SOURCE_TABLE);
Table replicaTable = catalog.client().getTable(DATABASE, REPLICA_TABLE);
HiveDifferences
.builder(diffListener)
.comparatorRegistry(comparatorRegistry)
.source(configuration, sourceTable, new PartitionIterator(catalog.client(), sourceTable, PARTITION_BATCH_SIZE))
.replica(Optional.of(replicaTable),
Optional.of(new BufferedPartitionFetcher(catalog.client(), replicaTable, PARTITION_BATCH_SIZE)))
.checksumFunction(checksumFunction)
.build()
.run();
verify(diffListener, never()).onChangedTable(anyList());
verify(diffListener, never()).onNewPartition(anyString(), any(Partition.class));
verify(diffListener, never()).onChangedPartition(anyString(), any(Partition.class), anyList());
verify(diffListener, never()).onDataChanged(anyString(), any(Partition.class));
}
示例5: updateMetadata
import org.apache.hadoop.hive.metastore.api.Table; //导入依赖的package包/类
public void updateMetadata(
String eventId,
TableAndStatistics sourceTable,
String replicaDatabaseName,
String replicaTableName,
ReplicaLocationManager locationManager) {
try (CloseableMetaStoreClient client = getMetaStoreClientSupplier().get()) {
Optional<Table> oldReplicaTable = updateTableMetadata(client, eventId, sourceTable, replicaDatabaseName,
replicaTableName, locationManager.getTableLocation(), replicationMode);
if (oldReplicaTable.isPresent() && LocationUtils.hasLocation(oldReplicaTable.get())) {
Path oldLocation = locationAsPath(oldReplicaTable.get());
String oldEventId = oldReplicaTable.get().getParameters().get(REPLICATION_EVENT.parameterName());
locationManager.addCleanUpLocation(oldEventId, oldLocation);
}
}
}
示例6: HiveTable
import org.apache.hadoop.hive.metastore.api.Table; //导入依赖的package包/类
public HiveTable(Table table) {
if (table == null) {
return;
}
this.table = table;
this.tableName = table.getTableName();
this.dbName = table.getDbName();
this.owner = table.getOwner();
this.createTime = table.getCreateTime();
this.lastAccessTime = table.getLastAccessTime();
this.retention = table.getRetention();
this.sd = new StorageDescriptorWrapper(table.getSd());
this.partitionKeys = Lists.newArrayList();
for (FieldSchema f : table.getPartitionKeys()) {
this.partitionKeys.add(new FieldSchemaWrapper(f));
partitionNameTypeMap.put(f.getName(), f.getType());
}
this.parameters = table.getParameters();
this.viewOriginalText = table.getViewOriginalText();
this.viewExpandedText = table.getViewExpandedText();
this.tableType = table.getTableType();
}
示例7: replicate
import org.apache.hadoop.hive.metastore.api.Table; //导入依赖的package包/类
@Override
public void replicate() throws CircusTrainException {
try {
replica.validateReplicaTable(replicaDatabaseName, replicaTableName);
TableAndStatistics sourceTableAndStatistics = source.getTableAndStatistics(database, table);
Table sourceTable = sourceTableAndStatistics.getTable();
SourceLocationManager sourceLocationManager = source.getLocationManager(sourceTable, eventId);
ReplicaLocationManager replicaLocationManager = new MetadataMirrorReplicaLocationManager(sourceLocationManager,
TableType.UNPARTITIONED);
sourceLocationManager.cleanUpLocations();
replica.updateMetadata(eventId, sourceTableAndStatistics, replicaDatabaseName, replicaTableName,
replicaLocationManager);
LOG.info("Metadata mirrored for table {}.{} (no data copied).", database, table);
} catch (Throwable t) {
throw new CircusTrainException("Unable to replicate", t);
}
}
示例8: HdfsSnapshotLocationManager
import org.apache.hadoop.hive.metastore.api.Table; //导入依赖的package包/类
HdfsSnapshotLocationManager(
HiveConf sourceHiveConf,
String eventId,
Table sourceTable,
List<Partition> sourcePartitions,
boolean snapshotsDisabled,
String tableBasePath,
FileSystemFactory fileSystemFactory,
SourceCatalogListener sourceCatalogListener) throws IOException {
this.sourceHiveConf = sourceHiveConf;
this.eventId = eventId;
this.sourceTable = sourceTable;
this.snapshotsDisabled = snapshotsDisabled;
this.sourceCatalogListener = sourceCatalogListener;
this.fileSystemFactory = fileSystemFactory;
String sourceDataLocation;
if (StringUtils.isNotBlank(tableBasePath)) {
sourceDataLocation = tableBasePath;
} else {
sourceDataLocation = sourceTable.getSd().getLocation();
}
sourceDataPath = new Path(sourceDataLocation);
copyBasePath = createSnapshot();
String copyBaseLocation = copyBasePath.toString();
subPaths = calculateSubPaths(sourcePartitions, sourceDataLocation, copyBaseLocation);
}
示例9: getLocationManager
import org.apache.hadoop.hive.metastore.api.Table; //导入依赖的package包/类
public SourceLocationManager getLocationManager(
Table table,
List<Partition> partitions,
String eventId,
Map<String, Object> copierOptions)
throws IOException {
if (MetaStoreUtils.isView(table)) {
return new ViewLocationManager();
}
HdfsSnapshotLocationManager hdfsSnapshotLocationManager = new HdfsSnapshotLocationManager(getHiveConf(), eventId,
table, partitions, snapshotsDisabled, sourceTableLocation, sourceCatalogListener);
boolean ignoreMissingFolder = MapUtils.getBooleanValue(copierOptions,
CopierOptions.IGNORE_MISSING_PARTITION_FOLDER_ERRORS, false);
if (ignoreMissingFolder) {
return new FilterMissingPartitionsLocationManager(hdfsSnapshotLocationManager, getHiveConf());
}
return hdfsSnapshotLocationManager;
}
示例10: transformOverride
import org.apache.hadoop.hive.metastore.api.Table; //导入依赖的package包/类
@Test
public void transformOverride() throws Exception {
when(avroSerDeConfig.getBaseUrl()).thenReturn("schema");
Map<String, Object> avroOverrideOptions = new HashMap<>();
avroOverrideOptions.put(AvroSerDeConfig.TABLE_REPLICATION_OVERRIDE_BASE_URL, "schemaOverride");
Map<String, Object> transformOptions = new HashMap<>();
transformOptions.put(AvroSerDeConfig.TABLE_REPLICATION_OVERRIDE_AVRO_SERDE_OPTIONS, avroOverrideOptions);
when(tableReplicationEvent.getTransformOptions()).thenReturn(transformOptions);
EventReplicaTable eventReplicaTable = new EventReplicaTable("db", "table", "location");
when(tableReplicationEvent.getReplicaTable()).thenReturn(eventReplicaTable);
transformation.tableReplicationStart(tableReplicationEvent, "eventId");
HiveObjectUtils.updateSerDeUrl(table, AVRO_SCHEMA_URL_PARAMETER, "avroSourceUrl");
when(schemaCopier.copy("avroSourceUrl", "schemaOverride/eventId/")).thenReturn(destinationPath);
Table result = transformation.transform(table);
assertThat(result.getParameters().get(AVRO_SCHEMA_URL_PARAMETER), is(destinationPathString));
}
示例11: newTable
import org.apache.hadoop.hive.metastore.api.Table; //导入依赖的package包/类
private Table newTable() {
Table table = new Table();
table.setDbName(DB_NAME);
table.setTableName(TABLE_NAME);
table.setTableType(TableType.EXTERNAL_TABLE.name());
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation(tableLocation);
table.setSd(sd);
HashMap<String, String> parameters = new HashMap<>();
parameters.put(StatsSetupConst.ROW_COUNT, "1");
table.setParameters(parameters);
table.setPartitionKeys(PARTITIONS);
return table;
}
示例12: newTableWithNameMappings
import org.apache.hadoop.hive.metastore.api.Table; //导入依赖的package包/类
@Test
public void newTableWithNameMappings() {
TableAndStatistics replicaAndStats = factory.newReplicaTable(EVENT_ID, sourceTableAndStats, MAPPED_DB_NAME,
MAPPED_TABLE_NAME, REPLICA_DATA_DESTINATION, FULL);
Table replica = replicaAndStats.getTable();
assertThat(replica.getDbName(), is(MAPPED_DB_NAME));
assertThat(replica.getTableName(), is(MAPPED_TABLE_NAME));
assertThat(replica.getSd().getInputFormat(), is(INPUT_FORMAT));
assertThat(replica.getSd().getOutputFormat(), is(OUTPUT_FORMAT));
assertThat(replica.getSd().getLocation(), is(REPLICA_DATA_DESTINATION.toUri().toString()));
assertThat(replica.getParameters().get("com.hotels.bdp.circustrain.source.table"), is(DB_NAME + "." + TABLE_NAME));
assertThat(replica.getParameters().get("com.hotels.bdp.circustrain.source.metastore.uris"),
is(SOURCE_META_STORE_URIS));
assertThat(replica.getParameters().get("com.hotels.bdp.circustrain.source.location"), is(TABLE_LOCATION));
assertThat(replica.getParameters().get("com.hotels.bdp.circustrain.replication.event"), is(EVENT_ID));
assertThat(replica.getParameters().get("com.hotels.bdp.circustrain.last.replicated"), is(not(nullValue())));
assertThat(replica.getParameters().get("DO_NOT_UPDATE_STATS"), is("true"));
assertThat(replica.getParameters().get("STATS_GENERATED_VIA_STATS_TASK"), is("true"));
assertThat(replica.getParameters().get("STATS_GENERATED"), is("true"));
}
示例13: getPartitions
import org.apache.hadoop.hive.metastore.api.Table; //导入依赖的package包/类
private static List<PartitionValue> getPartitions(Table table, Partition partition) {
if(partition == null){
return Collections.emptyList();
}
final List<String> partitionValues = partition.getValues();
final List<PartitionValue> output = Lists.newArrayList();
final List<FieldSchema> partitionKeys = table.getPartitionKeys();
for(int i =0; i < partitionKeys.size(); i++){
PartitionValue value = getPartitionValue(partitionKeys.get(i), partitionValues.get(i));
if(value != null){
output.add(value);
}
}
return output;
}
示例14: createView
import org.apache.hadoop.hive.metastore.api.Table; //导入依赖的package包/类
private static Table createView(
HiveMetaStoreClient metaStoreClient,
String database,
String view,
String table,
List<FieldSchema> partitionCols)
throws TException {
Table hiveView = new Table();
hiveView.setDbName(database);
hiveView.setTableName(view);
hiveView.setTableType(TableType.VIRTUAL_VIEW.name());
hiveView.setViewOriginalText(hql(database, table));
hiveView.setViewExpandedText(expandHql(database, table, DATA_COLUMNS, partitionCols));
hiveView.setPartitionKeys(partitionCols);
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
hiveView.setSd(sd);
metaStoreClient.createTable(hiveView);
return hiveView;
}
示例15: sourcePartitionHasChanged
import org.apache.hadoop.hive.metastore.api.Table; //导入依赖的package包/类
@Test
public void sourcePartitionHasChanged() throws Exception {
Partition sourcePartition1 = catalog.client().getPartition(DATABASE, SOURCE_TABLE, "part=1");
sourcePartition1.getSd().getCols().add(BAZ_COL);
catalog.client().alter_partition(DATABASE, SOURCE_TABLE, sourcePartition1);
Table sourceTable = catalog.client().getTable(DATABASE, SOURCE_TABLE);
Table replicaTable = catalog.client().getTable(DATABASE, REPLICA_TABLE);
HiveDifferences
.builder(diffListener)
.comparatorRegistry(comparatorRegistry)
.source(configuration, sourceTable, new PartitionIterator(catalog.client(), sourceTable, PARTITION_BATCH_SIZE))
.replica(Optional.of(replicaTable),
Optional.of(new BufferedPartitionFetcher(catalog.client(), replicaTable, PARTITION_BATCH_SIZE)))
.checksumFunction(checksumFunction)
.build()
.run();
verify(diffListener, never()).onChangedTable(anyList());
verify(diffListener, never()).onNewPartition(anyString(), any(Partition.class));
verify(diffListener, times(1)).onChangedPartition("part=1",
catalog.client().getPartition(DATABASE, SOURCE_TABLE, "part=1"),
Arrays.<Diff<Object, Object>> asList(new BaseDiff<Object, Object>(
"Collection partition.sd.cols of class java.util.ArrayList has different size: left.size()=3 and right.size()=2",
Arrays.asList(FOO_COL, BAR_COL, BAZ_COL), Arrays.asList(FOO_COL, BAR_COL))));
verify(diffListener, never()).onDataChanged(anyString(), any(Partition.class));
}