本文整理汇总了Java中org.apache.hadoop.hive.metastore.api.Partition.setTableName方法的典型用法代码示例。如果您正苦于以下问题:Java Partition.setTableName方法的具体用法?Java Partition.setTableName怎么用?Java Partition.setTableName使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.metastore.api.Partition
的用法示例。
在下文中一共展示了Partition.setTableName方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: dropPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
@Test
public void dropPartition() throws Exception {
String tableName = "my_table";
HiveMetaStoreClient client = server.newClient();
try {
Table table = createPartitionedTable(DATABASE, tableName);
Partition partition = new Partition();
partition.setDbName(DATABASE);
partition.setTableName(tableName);
partition.setValues(Arrays.asList("1"));
partition.setSd(new StorageDescriptor(table.getSd()));
partition.getSd().setLocation(
String.format("file:%s/%s/%s/partcol=1", server.temporaryFolder.getRoot(), DATABASE, tableName));
client.add_partition(partition);
try (Connection connection = DriverManager.getConnection(server.connectionURL());
Statement statement = connection.createStatement()) {
String addPartitionHql = String.format("ALTER TABLE %s.%s DROP PARTITION (partcol=1)", DATABASE, tableName);
statement.execute(addPartitionHql);
}
List<Partition> partitions = client.listPartitions(DATABASE, tableName, (short) -1);
assertThat(partitions.size(), is(0));
} finally {
client.close();
}
}
示例2: newPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
private Partition newPartition(StorageDescriptor tableStorageDescriptor, List<String> values, File location) {
Partition partition = new Partition();
partition.setDbName(DATABASE);
partition.setTableName(TABLE);
partition.setValues(values);
partition.setSd(new StorageDescriptor(tableStorageDescriptor));
partition.getSd().setLocation(location.toURI().toString());
return partition;
}
示例3: newReplicaPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
Partition newReplicaPartition(
String eventId,
Table sourceTable,
Partition sourcePartition,
String replicaDatabaseName,
String replicaTableName,
Path replicaPartitionLocation,
ReplicationMode replicationMode) {
Partition replica = partitionTransformation.transform(new Partition(sourcePartition));
replica.setDbName(replicaDatabaseName);
replica.setTableName(replicaTableName);
if (replica.getSd() != null) {
replica.getSd().setLocation(toStringOrNull(replicaPartitionLocation));
}
String sourcePartitionLocation = sourcePartition.getSd() == null ? ""
: toStringOrEmpty(sourcePartition.getSd().getLocation());
// Statistic specific parameters
replica.putToParameters(STATS_GENERATED_VIA_STATS_TASK, Boolean.TRUE.toString());
replica.putToParameters(STATS_GENERATED, Boolean.TRUE.toString());
replica.putToParameters(DO_NOT_UPDATE_STATS, Boolean.TRUE.toString());
// Replication specific parameters
replica.putToParameters(LAST_REPLICATED.parameterName(), DateTime.now(DateTimeZone.UTC).toString());
replica.putToParameters(REPLICATION_EVENT.parameterName(), eventId);
replica.putToParameters(SOURCE_LOCATION.parameterName(), sourcePartitionLocation);
replica.putToParameters(SOURCE_TABLE.parameterName(), Warehouse.getQualifiedName(sourceTable));
replica.putToParameters(SOURCE_METASTORE.parameterName(), sourceMetaStoreUris);
replica.putToParameters(REPLICATION_MODE.parameterName(), replicationMode.name());
return replica;
}
示例4: newPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
private Partition newPartition(String... values) {
Partition partition = new Partition();
partition.setDbName(DB_NAME);
partition.setTableName(TABLE_NAME);
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation(new Path(tableLocation, partitionName(values)).toUri().toString());
sd.setCols(FIELDS);
partition.setSd(sd);
HashMap<String, String> parameters = new HashMap<>();
parameters.put(StatsSetupConst.ROW_COUNT, "1");
partition.setParameters(parameters);
partition.setValues(Arrays.asList(values));
return partition;
}
示例5: newReplicaPartitionStatisticsWithTransformation
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
@Test
public void newReplicaPartitionStatisticsWithTransformation() throws MetaException {
sourceTable.setPartitionKeys(
Arrays.asList(new FieldSchema("one", "string", null), new FieldSchema("two", "string", null)));
Partition replicaPartition = new Partition(sourcePartition);
replicaPartition.setDbName(MAPPED_DB_NAME);
replicaPartition.setTableName(MAPPED_TABLE_NAME);
replicaPartition.setValues(Arrays.asList("A", "B"));
ColumnStatisticsObj columnStatisticsObj1 = new ColumnStatisticsObj();
ColumnStatisticsObj columnStatisticsObj2 = new ColumnStatisticsObj();
List<ColumnStatisticsObj> columnStatisticsObjs = Arrays.asList(columnStatisticsObj1, columnStatisticsObj2);
ColumnStatisticsDesc columnStatisticsDesc = new ColumnStatisticsDesc(false, DB_NAME, TABLE_NAME);
columnStatisticsDesc
.setPartName(Warehouse.makePartName(sourceTable.getPartitionKeys(), replicaPartition.getValues()));
ColumnStatistics sourcePartitionStatistics = new ColumnStatistics(columnStatisticsDesc, columnStatisticsObjs);
ReplicaTableFactory factory = new ReplicaTableFactory(SOURCE_META_STORE_URIS, TableTransformation.IDENTITY,
PartitionTransformation.IDENTITY, COLUMN_STATISTICS_TRANSFORMATION);
ColumnStatistics replicaPartitionStatistics = factory.newReplicaPartitionStatistics(sourceTable, replicaPartition,
sourcePartitionStatistics);
assertThat(replicaPartitionStatistics.getStatsDesc().getDbName(), is("new_db"));
assertThat(replicaPartitionStatistics.getStatsDesc().getTableName(), is("new_table"));
assertThat(replicaPartitionStatistics.getStatsDesc().getPartName(), is("part=newPart"));
assertThat(replicaPartitionStatistics.getStatsObj().size(), is(2));
assertThat(replicaPartitionStatistics.getStatsObj().get(0), is(columnStatisticsObj1));
assertThat(replicaPartitionStatistics.getStatsObj().get(1), is(columnStatisticsObj2));
}
示例6: newPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
public static Partition newPartition(Table table, String... partitionValues) {
Partition partition = new Partition();
partition.setTableName(table.getTableName());
partition.setDbName(table.getDbName());
partition.setValues(Arrays.asList(partitionValues));
partition.setSd(table.getSd());
partition.setParameters(new HashMap<String, String>());
return partition;
}
示例7: newTablePartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
public static Partition newTablePartition(Table hiveTable, List<String> values, URI location) {
Partition partition = new Partition();
partition.setDbName(hiveTable.getDbName());
partition.setTableName(hiveTable.getTableName());
partition.setValues(values);
partition.setSd(new StorageDescriptor(hiveTable.getSd()));
partition.getSd().setLocation(location.toString());
return partition;
}
示例8: newViewPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
public static Partition newViewPartition(Table hiveView, List<String> values) {
Partition partition = new Partition();
partition.setDbName(hiveView.getDbName());
partition.setTableName(hiveView.getTableName());
partition.setValues(values);
partition.setSd(new StorageDescriptor(hiveView.getSd()));
partition.getSd().setLocation(null);
return partition;
}
示例9: newPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
private Partition newPartition(
String datbase,
String table,
StorageDescriptor tableStorageDescriptor,
List<String> values,
File location,
String sourceTable,
String sourceLocation,
boolean addChecksum) {
Partition partition = new Partition();
partition.setDbName(datbase);
partition.setTableName(table);
partition.setValues(values);
partition.setSd(new StorageDescriptor(tableStorageDescriptor));
partition.getSd().setLocation(location.toURI().toString());
partition.setParameters(new HashMap<String, String>());
if (sourceTable != null) {
partition.getParameters().put(CircusTrainTableParameter.SOURCE_TABLE.parameterName(), sourceTable);
}
if (sourceLocation != null) {
partition.getParameters().put(CircusTrainTableParameter.SOURCE_LOCATION.parameterName(), sourceLocation);
}
if (addChecksum) {
partition.getParameters().put(CircusTrainTableParameter.PARTITION_CHECKSUM.parameterName(), location.getName());
}
return partition;
}
示例10: newPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
private static Partition newPartition(String databaseName, String tableName, String location) {
Partition partition = new Partition();
partition.setDbName(databaseName);
partition.setTableName(tableName);
partition.setParameters(new HashMap<String, String>());
partition.setValues(Arrays.asList("01"));
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation(location);
partition.setSd(sd);
return partition;
}
示例11: newPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
public static Partition newPartition(String database, String tableName, String partitionValue) {
Partition partition = new Partition();
partition.setDbName(database);
partition.setTableName(tableName);
partition.setCreateTime(CREATE_TIME);
partition.setValues(ImmutableList.of(partitionValue));
Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
privileges.setUserPrivileges(userPrivileges);
partition.setPrivileges(privileges);
StorageDescriptor storageDescriptor = new StorageDescriptor();
storageDescriptor.setCols(COLS);
storageDescriptor.setInputFormat(INPUT_FORMAT);
storageDescriptor.setOutputFormat(OUTPUT_FORMAT);
storageDescriptor.setSerdeInfo(new SerDeInfo(SERDE_INFO_NAME, SERIALIZATION_LIB, new HashMap<String, String>()));
storageDescriptor.setSkewedInfo(new SkewedInfo());
storageDescriptor.setParameters(new HashMap<String, String>());
storageDescriptor.setLocation(DATABASE + "/" + tableName + "/" + partitionValue + "/");
partition.setSd(storageDescriptor);
Map<String, String> parameters = new HashMap<>();
parameters.put("com.company.parameter", "abc");
partition.setParameters(parameters);
return partition;
}
示例12: init
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
@Before
public void init() {
partition = new Partition();
partition.setDbName("database");
partition.setTableName("table");
partition.setValues(ImmutableList.of("part"));
Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
privileges.setUserPrivileges(userPrivileges);
partition.setPrivileges(privileges);
StorageDescriptor storageDescriptor = new StorageDescriptor();
storageDescriptor.setCols(Arrays.asList(new FieldSchema("a", "int", null)));
storageDescriptor.setInputFormat("input_format");
storageDescriptor.setOutputFormat("output_format");
storageDescriptor.setSerdeInfo(new SerDeInfo("serde", "lib", new HashMap<String, String>()));
storageDescriptor.setSkewedInfo(new SkewedInfo());
storageDescriptor.setParameters(new HashMap<String, String>());
storageDescriptor.setLocation("database/table/part/");
partition.setSd(storageDescriptor);
Map<String, String> parameters = new HashMap<>();
parameters.put("com.company.parameter", "abc");
partition.setParameters(parameters);
}
示例13: newPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
static Partition newPartition(Table hiveTable, List<String> values, File location) {
Partition partition = new Partition();
partition.setDbName(hiveTable.getDbName());
partition.setTableName(hiveTable.getTableName());
partition.setValues(values);
partition.setSd(new StorageDescriptor(hiveTable.getSd()));
partition.getSd().setLocation(location.toURI().toString());
return partition;
}
示例14: newPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
private Partition newPartition(
String table,
StorageDescriptor tableStorageDescriptor,
List<String> values,
File location) {
Partition partition = new Partition();
partition.setDbName(DATABASE);
partition.setTableName(table);
partition.setValues(values);
partition.setSd(new StorageDescriptor(tableStorageDescriptor));
partition.getSd().setLocation(location.toURI().toString());
return partition;
}
示例15: newReplicaPartitionStatistics
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
@Test
public void newReplicaPartitionStatistics() throws MetaException {
sourceTable.setPartitionKeys(
Arrays.asList(new FieldSchema("one", "string", null), new FieldSchema("two", "string", null)));
Partition replicaPartition = new Partition(sourcePartition);
replicaPartition.setDbName(MAPPED_DB_NAME);
replicaPartition.setTableName(MAPPED_TABLE_NAME);
replicaPartition.setValues(Arrays.asList("A", "B"));
ColumnStatisticsObj columnStatisticsObj1 = new ColumnStatisticsObj();
ColumnStatisticsObj columnStatisticsObj2 = new ColumnStatisticsObj();
List<ColumnStatisticsObj> columnStatisticsObjs = Arrays.asList(columnStatisticsObj1, columnStatisticsObj2);
ColumnStatisticsDesc columnStatisticsDesc = new ColumnStatisticsDesc(false, DB_NAME, TABLE_NAME);
columnStatisticsDesc
.setPartName(Warehouse.makePartName(sourceTable.getPartitionKeys(), replicaPartition.getValues()));
ColumnStatistics sourcePartitionStatistics = new ColumnStatistics(columnStatisticsDesc, columnStatisticsObjs);
ColumnStatistics replicaPartitionStatistics = factory.newReplicaPartitionStatistics(sourceTable, replicaPartition,
sourcePartitionStatistics);
assertThat(replicaPartitionStatistics.getStatsDesc().getDbName(), is(MAPPED_DB_NAME));
assertThat(replicaPartitionStatistics.getStatsDesc().getTableName(), is(MAPPED_TABLE_NAME));
assertThat(replicaPartitionStatistics.getStatsDesc().getPartName(), is("one=A/two=B"));
assertThat(replicaPartitionStatistics.getStatsObj().size(), is(2));
assertThat(replicaPartitionStatistics.getStatsObj().get(0), is(columnStatisticsObj1));
assertThat(replicaPartitionStatistics.getStatsObj().get(1), is(columnStatisticsObj2));
}