本文整理汇总了Java中org.apache.hadoop.hive.metastore.api.Partition.setDbName方法的典型用法代码示例。如果您正苦于以下问题:Java Partition.setDbName方法的具体用法?Java Partition.setDbName怎么用?Java Partition.setDbName使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.metastore.api.Partition
的用法示例。
在下文中一共展示了Partition.setDbName方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setUp
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
@Before
public void setUp() {
databaseMapping = new DatabaseMappingImpl(metastoreMapping);
database = new Database();
database.setName(DB_NAME);
partition = new Partition();
partition.setDbName(DB_NAME);
partitions = Lists.newArrayList(partition);
index = new Index();
index.setDbName(DB_NAME);
hiveObjectRef = new HiveObjectRef();
hiveObjectRef.setDbName(DB_NAME);
hiveObjectRef.setObjectType(HiveObjectType.DATABASE);
hiveObjectRef.setObjectName(DB_NAME);
hiveObjectPrivileges = new ArrayList<>();
HiveObjectPrivilege hiveObjectPrivilege = new HiveObjectPrivilege();
hiveObjectPrivilege.setHiveObject(hiveObjectRef);
hiveObjectPrivileges.add(hiveObjectPrivilege);
partitionSpec = new PartitionSpec();
partitionSpec.setDbName(DB_NAME);
when(metastoreMapping.transformInboundDatabaseName(DB_NAME)).thenReturn(IN_DB_NAME);
when(metastoreMapping.transformOutboundDatabaseName(DB_NAME)).thenReturn(OUT_DB_NAME);
}
示例2: dropPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
@Test
public void dropPartition() throws Exception {
String tableName = "my_table";
HiveMetaStoreClient client = server.newClient();
try {
Table table = createPartitionedTable(DATABASE, tableName);
Partition partition = new Partition();
partition.setDbName(DATABASE);
partition.setTableName(tableName);
partition.setValues(Arrays.asList("1"));
partition.setSd(new StorageDescriptor(table.getSd()));
partition.getSd().setLocation(
String.format("file:%s/%s/%s/partcol=1", server.temporaryFolder.getRoot(), DATABASE, tableName));
client.add_partition(partition);
try (Connection connection = DriverManager.getConnection(server.connectionURL());
Statement statement = connection.createStatement()) {
String addPartitionHql = String.format("ALTER TABLE %s.%s DROP PARTITION (partcol=1)", DATABASE, tableName);
statement.execute(addPartitionHql);
}
List<Partition> partitions = client.listPartitions(DATABASE, tableName, (short) -1);
assertThat(partitions.size(), is(0));
} finally {
client.close();
}
}
示例3: newPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
private Partition newPartition(StorageDescriptor tableStorageDescriptor, List<String> values, File location) {
Partition partition = new Partition();
partition.setDbName(DATABASE);
partition.setTableName(TABLE);
partition.setValues(values);
partition.setSd(new StorageDescriptor(tableStorageDescriptor));
partition.getSd().setLocation(location.toURI().toString());
return partition;
}
示例4: newReplicaPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
Partition newReplicaPartition(
String eventId,
Table sourceTable,
Partition sourcePartition,
String replicaDatabaseName,
String replicaTableName,
Path replicaPartitionLocation,
ReplicationMode replicationMode) {
Partition replica = partitionTransformation.transform(new Partition(sourcePartition));
replica.setDbName(replicaDatabaseName);
replica.setTableName(replicaTableName);
if (replica.getSd() != null) {
replica.getSd().setLocation(toStringOrNull(replicaPartitionLocation));
}
String sourcePartitionLocation = sourcePartition.getSd() == null ? ""
: toStringOrEmpty(sourcePartition.getSd().getLocation());
// Statistic specific parameters
replica.putToParameters(STATS_GENERATED_VIA_STATS_TASK, Boolean.TRUE.toString());
replica.putToParameters(STATS_GENERATED, Boolean.TRUE.toString());
replica.putToParameters(DO_NOT_UPDATE_STATS, Boolean.TRUE.toString());
// Replication specific parameters
replica.putToParameters(LAST_REPLICATED.parameterName(), DateTime.now(DateTimeZone.UTC).toString());
replica.putToParameters(REPLICATION_EVENT.parameterName(), eventId);
replica.putToParameters(SOURCE_LOCATION.parameterName(), sourcePartitionLocation);
replica.putToParameters(SOURCE_TABLE.parameterName(), Warehouse.getQualifiedName(sourceTable));
replica.putToParameters(SOURCE_METASTORE.parameterName(), sourceMetaStoreUris);
replica.putToParameters(REPLICATION_MODE.parameterName(), replicationMode.name());
return replica;
}
示例5: newPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
private Partition newPartition(String... values) {
Partition partition = new Partition();
partition.setDbName(DB_NAME);
partition.setTableName(TABLE_NAME);
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation(new Path(tableLocation, partitionName(values)).toUri().toString());
sd.setCols(FIELDS);
partition.setSd(sd);
HashMap<String, String> parameters = new HashMap<>();
parameters.put(StatsSetupConst.ROW_COUNT, "1");
partition.setParameters(parameters);
partition.setValues(Arrays.asList(values));
return partition;
}
示例6: newReplicaPartitionStatisticsWithTransformation
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
@Test
public void newReplicaPartitionStatisticsWithTransformation() throws MetaException {
sourceTable.setPartitionKeys(
Arrays.asList(new FieldSchema("one", "string", null), new FieldSchema("two", "string", null)));
Partition replicaPartition = new Partition(sourcePartition);
replicaPartition.setDbName(MAPPED_DB_NAME);
replicaPartition.setTableName(MAPPED_TABLE_NAME);
replicaPartition.setValues(Arrays.asList("A", "B"));
ColumnStatisticsObj columnStatisticsObj1 = new ColumnStatisticsObj();
ColumnStatisticsObj columnStatisticsObj2 = new ColumnStatisticsObj();
List<ColumnStatisticsObj> columnStatisticsObjs = Arrays.asList(columnStatisticsObj1, columnStatisticsObj2);
ColumnStatisticsDesc columnStatisticsDesc = new ColumnStatisticsDesc(false, DB_NAME, TABLE_NAME);
columnStatisticsDesc
.setPartName(Warehouse.makePartName(sourceTable.getPartitionKeys(), replicaPartition.getValues()));
ColumnStatistics sourcePartitionStatistics = new ColumnStatistics(columnStatisticsDesc, columnStatisticsObjs);
ReplicaTableFactory factory = new ReplicaTableFactory(SOURCE_META_STORE_URIS, TableTransformation.IDENTITY,
PartitionTransformation.IDENTITY, COLUMN_STATISTICS_TRANSFORMATION);
ColumnStatistics replicaPartitionStatistics = factory.newReplicaPartitionStatistics(sourceTable, replicaPartition,
sourcePartitionStatistics);
assertThat(replicaPartitionStatistics.getStatsDesc().getDbName(), is("new_db"));
assertThat(replicaPartitionStatistics.getStatsDesc().getTableName(), is("new_table"));
assertThat(replicaPartitionStatistics.getStatsDesc().getPartName(), is("part=newPart"));
assertThat(replicaPartitionStatistics.getStatsObj().size(), is(2));
assertThat(replicaPartitionStatistics.getStatsObj().get(0), is(columnStatisticsObj1));
assertThat(replicaPartitionStatistics.getStatsObj().get(1), is(columnStatisticsObj2));
}
示例7: newPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
public static Partition newPartition(Table table, String... partitionValues) {
Partition partition = new Partition();
partition.setTableName(table.getTableName());
partition.setDbName(table.getDbName());
partition.setValues(Arrays.asList(partitionValues));
partition.setSd(table.getSd());
partition.setParameters(new HashMap<String, String>());
return partition;
}
示例8: newTablePartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
public static Partition newTablePartition(Table hiveTable, List<String> values, URI location) {
Partition partition = new Partition();
partition.setDbName(hiveTable.getDbName());
partition.setTableName(hiveTable.getTableName());
partition.setValues(values);
partition.setSd(new StorageDescriptor(hiveTable.getSd()));
partition.getSd().setLocation(location.toString());
return partition;
}
示例9: newViewPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
public static Partition newViewPartition(Table hiveView, List<String> values) {
Partition partition = new Partition();
partition.setDbName(hiveView.getDbName());
partition.setTableName(hiveView.getTableName());
partition.setValues(values);
partition.setSd(new StorageDescriptor(hiveView.getSd()));
partition.getSd().setLocation(null);
return partition;
}
示例10: newPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
private Partition newPartition(
String datbase,
String table,
StorageDescriptor tableStorageDescriptor,
List<String> values,
File location,
String sourceTable,
String sourceLocation,
boolean addChecksum) {
Partition partition = new Partition();
partition.setDbName(datbase);
partition.setTableName(table);
partition.setValues(values);
partition.setSd(new StorageDescriptor(tableStorageDescriptor));
partition.getSd().setLocation(location.toURI().toString());
partition.setParameters(new HashMap<String, String>());
if (sourceTable != null) {
partition.getParameters().put(CircusTrainTableParameter.SOURCE_TABLE.parameterName(), sourceTable);
}
if (sourceLocation != null) {
partition.getParameters().put(CircusTrainTableParameter.SOURCE_LOCATION.parameterName(), sourceLocation);
}
if (addChecksum) {
partition.getParameters().put(CircusTrainTableParameter.PARTITION_CHECKSUM.parameterName(), location.getName());
}
return partition;
}
示例11: newPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
private static Partition newPartition(String databaseName, String tableName, String location) {
Partition partition = new Partition();
partition.setDbName(databaseName);
partition.setTableName(tableName);
partition.setParameters(new HashMap<String, String>());
partition.setValues(Arrays.asList("01"));
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation(location);
partition.setSd(sd);
return partition;
}
示例12: add_partitions
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
@Test
public void add_partitions() throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
Partition newPartition1 = new Partition();
newPartition1.setDbName(DB_P);
Partition newPartition2 = new Partition();
newPartition2.setDbName(DB_P);
List<Partition> inbound = Lists.newArrayList(new Partition());
List<Partition> partitions = Lists.newArrayList(newPartition1, newPartition2);
when(primaryMapping.transformInboundPartitions(partitions)).thenReturn(inbound);
when(primaryClient.add_partitions(inbound)).thenReturn(2);
int result = handler.add_partitions(partitions);
assertThat(result, is(2));
verify(primaryMapping, times(2)).checkWritePermissions(DB_P);
}
示例13: init
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
@Before
public void init() {
partition = new Partition();
partition.setDbName("database");
partition.setTableName("table");
partition.setValues(ImmutableList.of("part"));
Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
privileges.setUserPrivileges(userPrivileges);
partition.setPrivileges(privileges);
StorageDescriptor storageDescriptor = new StorageDescriptor();
storageDescriptor.setCols(Arrays.asList(new FieldSchema("a", "int", null)));
storageDescriptor.setInputFormat("input_format");
storageDescriptor.setOutputFormat("output_format");
storageDescriptor.setSerdeInfo(new SerDeInfo("serde", "lib", new HashMap<String, String>()));
storageDescriptor.setSkewedInfo(new SkewedInfo());
storageDescriptor.setParameters(new HashMap<String, String>());
storageDescriptor.setLocation("database/table/part/");
partition.setSd(storageDescriptor);
Map<String, String> parameters = new HashMap<>();
parameters.put("com.company.parameter", "abc");
partition.setParameters(parameters);
}
示例14: newPartition
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
static Partition newPartition(Table hiveTable, List<String> values, File location) {
Partition partition = new Partition();
partition.setDbName(hiveTable.getDbName());
partition.setTableName(hiveTable.getTableName());
partition.setValues(values);
partition.setSd(new StorageDescriptor(hiveTable.getSd()));
partition.getSd().setLocation(location.toURI().toString());
return partition;
}
示例15: add_partitions_req
import org.apache.hadoop.hive.metastore.api.Partition; //导入方法依赖的package包/类
@Test
public void add_partitions_req() throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
Partition newPartition1 = new Partition();
newPartition1.setDbName(DB_P);
Partition newPartition2 = new Partition();
newPartition2.setDbName(DB_P);
List<Partition> partitions = Lists.newArrayList(newPartition1, newPartition2);
AddPartitionsRequest request = new AddPartitionsRequest();
request.setDbName(DB_P);
request.setParts(partitions);
AddPartitionsRequest inbound = new AddPartitionsRequest();
AddPartitionsResult addPartitionResult = new AddPartitionsResult();
AddPartitionsResult outbound = new AddPartitionsResult();
when(primaryMapping.transformInboundAddPartitionsRequest(request)).thenReturn(inbound);
when(primaryClient.add_partitions_req(inbound)).thenReturn(addPartitionResult);
when(primaryMapping.transformOutboundAddPartitionsResult(addPartitionResult)).thenReturn(outbound);
AddPartitionsResult result = handler.add_partitions_req(request);
assertThat(result, is(outbound));
verify(primaryMapping, times(3)).checkWritePermissions(DB_P);
}