本文整理汇总了Java中org.apache.hadoop.hive.metastore.api.Table.putToParameters方法的典型用法代码示例。如果您正苦于以下问题:Java Table.putToParameters方法的具体用法?Java Table.putToParameters怎么用?Java Table.putToParameters使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.metastore.api.Table
的用法示例。
在下文中一共展示了Table.putToParameters方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createPartitionedTable
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
static Table createPartitionedTable(HiveMetaStoreClient metaStoreClient, String database, String table, File location)
throws Exception {
Table hiveTable = new Table();
hiveTable.setDbName(database);
hiveTable.setTableName(table);
hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
hiveTable.putToParameters("EXTERNAL", "TRUE");
hiveTable.setPartitionKeys(PARTITION_COLUMNS);
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setLocation(location.toURI().toString());
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
hiveTable.setSd(sd);
metaStoreClient.createTable(hiveTable);
return hiveTable;
}
示例2: createManagedPartitionedTable
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
void createManagedPartitionedTable(URI sourceTableUri) throws Exception {
TestUtils.createPartitionedTable(metaStoreClient, DATABASE, SOURCE_MANAGED_PARTITIONED_TABLE, sourceTableUri);
Table table = metaStoreClient.getTable(DATABASE, SOURCE_MANAGED_PARTITIONED_TABLE);
table.setTableType(TableType.MANAGED_TABLE.name());
table.putToParameters("EXTERNAL", "FALSE");
metaStoreClient.alter_table(table.getDbName(), table.getTableName(), table);
URI partitionEurope = URI.create(sourceTableUri + "/continent=Europe");
URI partitionUk = URI.create(partitionEurope + "/country=UK");
File dataFileUk = new File(partitionUk.getPath(), PART_00000);
FileUtils.writeStringToFile(dataFileUk, "1\tadam\tlondon\n2\tsusan\tglasgow\n");
URI partitionAsia = URI.create(sourceTableUri + "/continent=Asia");
URI partitionChina = URI.create(partitionAsia + "/country=China");
File dataFileChina = new File(partitionChina.getPath(), PART_00000);
FileUtils.writeStringToFile(dataFileChina, "1\tchun\tbeijing\n2\tshanghai\tmilan\n");
LOG.info(">>>> Partitions added: {}",
metaStoreClient
.add_partitions(Arrays.asList(newTablePartition(table, Arrays.asList("Europe", "UK"), partitionUk),
newTablePartition(table, Arrays.asList("Asia", "China"), partitionChina))));
}
示例3: apply
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
Table apply(Table table, String avroSchemaDestination, String eventId) throws Exception {
if (avroSchemaDestination == null) {
return table;
}
avroSchemaDestination = addTrailingSlash(avroSchemaDestination);
avroSchemaDestination += eventId;
String avroSchemaSource = table.getParameters().get(AVRO_SCHEMA_URL_PARAMETER);
copy(avroSchemaSource, avroSchemaDestination);
table.putToParameters(AVRO_SCHEMA_URL_PARAMETER,
avroSchemaDestination + "/" + getAvroSchemaFileName(avroSchemaSource));
LOG.info("Avro SerDe transformation has been applied to table '{}'", table.getTableName());
return table;
}
示例4: createUnpartitionedTable
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
static Table createUnpartitionedTable(
HiveMetaStoreClient metaStoreClient,
String database,
String table,
File location)
throws TException {
Table hiveTable = new Table();
hiveTable.setDbName(database);
hiveTable.setTableName(table);
hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
hiveTable.putToParameters("EXTERNAL", "TRUE");
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setLocation(location.toURI().toString());
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
hiveTable.setSd(sd);
metaStoreClient.createTable(hiveTable);
return hiveTable;
}
示例5: setReplicaTableType
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
private void setReplicaTableType(Table source, Table replica) {
if (TableType.VIRTUAL_VIEW.name().equals(source.getTableType())) {
replica.setTableType(TableType.VIRTUAL_VIEW.name());
return;
}
// We set the table to external no matter what. We don't want to delete data accidentally when dropping a mirrored
// table.
replica.setTableType(TableType.EXTERNAL_TABLE.name());
replica.putToParameters(EXTERNAL, "TRUE");
}
示例6: before
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
@Before
public void before() throws TException, IOException {
Table table = new Table();
table.setDbName(DATABASE);
table.setTableName("source_" + TABLE);
table.setTableType(TableType.EXTERNAL_TABLE.name());
table.putToParameters("EXTERNAL", "TRUE");
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(Arrays.asList(new FieldSchema("col1", "string", null)));
sd.setSerdeInfo(new SerDeInfo());
table.setSd(sd);
hive.client().createTable(table);
}
示例7: createUnpartitionedTable
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
public static Table createUnpartitionedTable(
HiveMetaStoreClient metaStoreClient,
String database,
String table,
URI location)
throws TException {
Table hiveTable = new Table();
hiveTable.setDbName(database);
hiveTable.setTableName(table);
hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
hiveTable.putToParameters("EXTERNAL", "TRUE");
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setLocation(location.toString());
sd.setParameters(new HashMap<String, String>());
sd.setInputFormat(TextInputFormat.class.getName());
sd.setOutputFormat(TextOutputFormat.class.getName());
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.OpenCSVSerde");
hiveTable.setSd(sd);
metaStoreClient.createTable(hiveTable);
ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, database, table);
ColumnStatisticsData statsData = new ColumnStatisticsData(_Fields.LONG_STATS, new LongColumnStatsData(1L, 2L));
ColumnStatisticsObj cso1 = new ColumnStatisticsObj("id", "bigint", statsData);
List<ColumnStatisticsObj> statsObj = Collections.singletonList(cso1);
metaStoreClient.updateTableColumnStatistics(new ColumnStatistics(statsDesc, statsObj));
return hiveTable;
}
示例8: createPartitionedTable
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
public static Table createPartitionedTable(
HiveMetaStoreClient metaStoreClient,
String database,
String table,
URI location)
throws Exception {
Table hiveTable = new Table();
hiveTable.setDbName(database);
hiveTable.setTableName(table);
hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
hiveTable.putToParameters("EXTERNAL", "TRUE");
hiveTable.setPartitionKeys(PARTITION_COLUMNS);
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setLocation(location.toString());
sd.setParameters(new HashMap<String, String>());
sd.setInputFormat(TextInputFormat.class.getName());
sd.setOutputFormat(TextOutputFormat.class.getName());
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.OpenCSVSerde");
hiveTable.setSd(sd);
metaStoreClient.createTable(hiveTable);
ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, database, table);
ColumnStatisticsData statsData = new ColumnStatisticsData(_Fields.LONG_STATS, new LongColumnStatsData(1L, 2L));
ColumnStatisticsObj cso1 = new ColumnStatisticsObj("id", "bigint", statsData);
List<ColumnStatisticsObj> statsObj = Collections.singletonList(cso1);
metaStoreClient.updateTableColumnStatistics(new ColumnStatistics(statsDesc, statsObj));
return hiveTable;
}
示例9: unpartitionedTableMetadataMirror
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
@Test
public void unpartitionedTableMetadataMirror() throws Exception {
helper.createManagedUnpartitionedTable(toUri(sourceWarehouseUri, DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE));
LOG.info(">>>> Table {} ", sourceCatalog.client().getTable(DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE));
// adjusting the sourceTable, mimicking the change we want to update
Table sourceTable = sourceCatalog.client().getTable(DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE);
sourceTable.putToParameters("paramToUpdate", "updated");
sourceCatalog.client().alter_table(sourceTable.getDbName(), sourceTable.getTableName(), sourceTable);
exit.expectSystemExitWithStatus(0);
File config = dataFolder.getFile("unpartitioned-single-table-mirror.yml");
CircusTrainRunner runner = CircusTrainRunner
.builder(DATABASE, sourceWarehouseUri, replicaWarehouseUri, housekeepingDbLocation)
.sourceMetaStore(sourceCatalog.getThriftConnectionUri(), sourceCatalog.connectionURL(),
sourceCatalog.driverClassName())
.replicaMetaStore(replicaCatalog.getThriftConnectionUri())
.build();
exit.checkAssertionAfterwards(new Assertion() {
@Override
public void checkAssertion() throws Exception {
Table hiveTable = replicaCatalog.client().getTable(DATABASE, TARGET_UNPARTITIONED_MANAGED_TABLE);
assertThat(hiveTable.getDbName(), is(DATABASE));
assertThat(hiveTable.getTableName(), is(TARGET_UNPARTITIONED_MANAGED_TABLE));
// MIRRORED table should be set to EXTERNAL
assertThat(isExternalTable(hiveTable), is(true));
assertThat(hiveTable.getParameters().get("paramToUpdate"), is("updated"));
assertThat(hiveTable.getSd().getCols(), is(DATA_COLUMNS));
File sameAsSourceLocation = new File(sourceWarehouseUri, DATABASE + "/" + SOURCE_MANAGED_UNPARTITIONED_TABLE);
assertThat(hiveTable.getSd().getLocation() + "/", is(sameAsSourceLocation.toURI().toString()));
}
});
runner.run(config.getAbsolutePath());
}
示例10: unpartitionedTableReplicateAvroSchema
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
@Test
public void unpartitionedTableReplicateAvroSchema() throws Exception {
helper.createManagedUnpartitionedTable(toUri(sourceWarehouseUri, DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE));
LOG.info(">>>> Table {} ", sourceCatalog.client().getTable(DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE));
java.nio.file.Path sourceAvroSchemaPath = Paths.get(sourceWarehouseUri.toString() + "/avro-schema-file.test");
Files.createDirectories(sourceAvroSchemaPath);
String avroSchemaBaseUrl = sourceAvroSchemaPath.toString();
Table sourceTable = sourceCatalog.client().getTable(DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE);
sourceTable.putToParameters("avro.schema.url", avroSchemaBaseUrl);
sourceCatalog.client().alter_table(sourceTable.getDbName(), sourceTable.getTableName(), sourceTable);
exit.expectSystemExitWithStatus(0);
File config = dataFolder.getFile("unpartitioned-single-table-avro-schema.yml");
CircusTrainRunner runner = CircusTrainRunner
.builder(DATABASE, sourceWarehouseUri, replicaWarehouseUri, housekeepingDbLocation)
.sourceMetaStore(sourceCatalog.getThriftConnectionUri(), sourceCatalog.connectionURL(),
sourceCatalog.driverClassName())
.replicaMetaStore(replicaCatalog.getThriftConnectionUri())
.build();
exit.checkAssertionAfterwards(new Assertion() {
@Override
public void checkAssertion() throws Exception {
Table replicaHiveTable = replicaCatalog.client().getTable(DATABASE, TARGET_UNPARTITIONED_MANAGED_TABLE);
String expectedReplicaSchemaUrl = replicaWarehouseUri.toURI().toString() + "ct_database/";
String transformedAvroUrl = replicaHiveTable.getParameters().get("avro.schema.url");
assertThat(transformedAvroUrl, startsWith(expectedReplicaSchemaUrl));
}
});
runner.run(config.getAbsolutePath());
}
示例11: unpartitionedTableReplicateAvroSchemaOverride
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
@Test
public void unpartitionedTableReplicateAvroSchemaOverride() throws Exception {
helper.createManagedUnpartitionedTable(toUri(sourceWarehouseUri, DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE));
LOG.info(">>>> Table {} ", sourceCatalog.client().getTable(DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE));
java.nio.file.Path sourceAvroSchemaPath = Paths.get(sourceWarehouseUri.toString() + "/avro-schema-file.test");
Files.createDirectories(sourceAvroSchemaPath);
String avroSchemaBaseUrl = sourceAvroSchemaPath.toString();
Table sourceTable = sourceCatalog.client().getTable(DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE);
sourceTable.putToParameters("avro.schema.url", avroSchemaBaseUrl);
sourceCatalog.client().alter_table(sourceTable.getDbName(), sourceTable.getTableName(), sourceTable);
exit.expectSystemExitWithStatus(0);
File config = dataFolder.getFile("unpartitioned-single-table-avro-schema-override.yml");
CircusTrainRunner runner = CircusTrainRunner
.builder(DATABASE, sourceWarehouseUri, replicaWarehouseUri, housekeepingDbLocation)
.sourceMetaStore(sourceCatalog.getThriftConnectionUri(), sourceCatalog.connectionURL(),
sourceCatalog.driverClassName())
.replicaMetaStore(replicaCatalog.getThriftConnectionUri())
.build();
exit.checkAssertionAfterwards(new Assertion() {
@Override
public void checkAssertion() throws Exception {
Table replicaHiveTable = replicaCatalog.client().getTable(DATABASE, TARGET_UNPARTITIONED_MANAGED_TABLE);
String expectedReplicaSchemaUrl = replicaWarehouseUri.toURI().toString() + "ct_database-override/";
String transformedAvroUrl = replicaHiveTable.getParameters().get("avro.schema.url");
assertThat(transformedAvroUrl, startsWith(expectedReplicaSchemaUrl));
}
});
runner.run(config.getAbsolutePath());
}
示例12: unpartitionedTableMetadataUpdateAvroSchema
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
@Test
public void unpartitionedTableMetadataUpdateAvroSchema() throws Exception {
helper.createManagedUnpartitionedTable(toUri(sourceWarehouseUri, DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE));
LOG.info(">>>> Table {} ", sourceCatalog.client().getTable(DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE));
String avroParameter = "avro.schema.url";
java.nio.file.Path sourceAvroSchemaUploadPath = Paths.get(sourceWarehouseUri.toString() + "/avro-schema-file.test");
Files.createDirectories(sourceAvroSchemaUploadPath);
String avroSchemaBaseUrl = sourceAvroSchemaUploadPath.toString();
URI replicaLocation = toUri(replicaWarehouseUri, DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE);
TestUtils.createUnpartitionedTable(replicaCatalog.client(), DATABASE, TARGET_UNPARTITIONED_MANAGED_TABLE,
replicaLocation);
Table table = replicaCatalog.client().getTable(DATABASE, TARGET_UNPARTITIONED_MANAGED_TABLE);
table.putToParameters(REPLICATION_EVENT.parameterName(), "dummyEventID");
replicaCatalog.client().alter_table(table.getDbName(), table.getTableName(), table);
Table sourceTable = sourceCatalog.client().getTable(DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE);
sourceTable.putToParameters(avroParameter, avroSchemaBaseUrl);
sourceCatalog.client().alter_table(sourceTable.getDbName(), sourceTable.getTableName(), sourceTable);
exit.expectSystemExitWithStatus(0);
File config = dataFolder.getFile("unpartitioned-single-table-avro-schema-metadata-update.yml");
CircusTrainRunner runner = CircusTrainRunner
.builder(DATABASE, sourceWarehouseUri, replicaWarehouseUri, housekeepingDbLocation)
.sourceMetaStore(sourceCatalog.getThriftConnectionUri(), sourceCatalog.connectionURL(),
sourceCatalog.driverClassName())
.replicaMetaStore(replicaCatalog.getThriftConnectionUri())
.build();
exit.checkAssertionAfterwards(new Assertion() {
@Override
public void checkAssertion() throws Exception {
Table replicaHiveTable = replicaCatalog.client().getTable(DATABASE, TARGET_UNPARTITIONED_MANAGED_TABLE);
String expectedReplicaSchemaUrl = replicaWarehouseUri.toURI().toString() + "ct_database/";
String transformedAvroUrl = replicaHiveTable.getParameters().get("avro.schema.url");
assertThat(transformedAvroUrl, startsWith(expectedReplicaSchemaUrl));
}
});
runner.run(config.getAbsolutePath());
}
示例13: multipleTransformationsApplied
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
@Test
public void multipleTransformationsApplied() throws Exception {
helper.createManagedUnpartitionedTable(toUri(sourceWarehouseUri, DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE));
LOG.info(">>>> Table {} ", sourceCatalog.client().getTable(DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE));
java.nio.file.Path sourceAvroSchemaPath = Paths.get(sourceWarehouseUri.toString() + "/avro-schema-file.test");
Files.createDirectories(sourceAvroSchemaPath);
String avroSchemaBaseUrl = sourceAvroSchemaPath.toString();
Table sourceTable = sourceCatalog.client().getTable(DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE);
sourceTable.putToParameters("avro.schema.url", avroSchemaBaseUrl);
sourceTable.putToParameters("circus.train.test.transformation", "enabled");
sourceCatalog.client().alter_table(sourceTable.getDbName(), sourceTable.getTableName(), sourceTable);
exit.expectSystemExitWithStatus(0);
File config = dataFolder.getFile("unpartitioned-single-table-avro-schema.yml");
CircusTrainRunner runner = CircusTrainRunner
.builder(DATABASE, sourceWarehouseUri, replicaWarehouseUri, housekeepingDbLocation)
.sourceMetaStore(sourceCatalog.getThriftConnectionUri(), sourceCatalog.connectionURL(),
sourceCatalog.driverClassName())
.replicaMetaStore(replicaCatalog.getThriftConnectionUri())
.build();
exit.checkAssertionAfterwards(new Assertion() {
@Override
public void checkAssertion() throws Exception {
Table replicaHiveTable = replicaCatalog.client().getTable(DATABASE, TARGET_UNPARTITIONED_MANAGED_TABLE);
String expectedReplicaSchemaUrl = replicaWarehouseUri.toURI().toString() + "ct_database/";
String transformedAvroUrl = replicaHiveTable.getParameters().get("avro.schema.url");
assertThat(transformedAvroUrl, startsWith(expectedReplicaSchemaUrl));
assertThat(replicaHiveTable.getParameters().get("table.transformed"), is("true"));
}
});
runner.run(config.getAbsolutePath());
}
示例14: createManagedUnpartitionedTable
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
void createManagedUnpartitionedTable(URI sourceTableUri) throws Exception {
File dataFile = new File(sourceTableUri.getPath(), PART_00000);
FileUtils.writeStringToFile(dataFile, "1\tadam\tlondon\n2\tsusan\tmilan\n");
TestUtils.createUnpartitionedTable(metaStoreClient, DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE, sourceTableUri);
Table table = metaStoreClient.getTable(DATABASE, SOURCE_MANAGED_UNPARTITIONED_TABLE);
table.setTableType(TableType.MANAGED_TABLE.name());
table.putToParameters("EXTERNAL", "FALSE");
metaStoreClient.alter_table(table.getDbName(), table.getTableName(), table);
}
示例15: transform
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
@Override
public Table transform(Table table) {
if (table.getParameters().get("circus.train.test.transformation") != null) {
table.putToParameters("table.transformed", "true");
}
return table;
}