当前位置: 首页>>代码示例>>Java>>正文


Java Partition类代码示例

本文整理汇总了Java中org.apache.hadoop.hive.metastore.api.Partition的典型用法代码示例。如果您正苦于以下问题:Java Partition类的具体用法?Java Partition怎么用?Java Partition使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


Partition类属于org.apache.hadoop.hive.metastore.api包,在下文中一共展示了Partition类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: PartitionedTablePathResolver

import org.apache.hadoop.hive.metastore.api.Partition; //导入依赖的package包/类
PartitionedTablePathResolver(IMetaStoreClient metastore, Table table)
    throws NoSuchObjectException, MetaException, TException {
  this.metastore = metastore;
  this.table = table;
  LOG.debug("Table '{}' is partitioned", Warehouse.getQualifiedName(table));
  tableBaseLocation = locationAsPath(table);
  List<Partition> onePartition = metastore.listPartitions(table.getDbName(), table.getTableName(), (short) 1);
  if (onePartition.isEmpty()) {
    LOG.warn("Table '{}' has no partitions, perhaps you can simply delete: {}.", Warehouse.getQualifiedName(table),
        tableBaseLocation);
    throw new ConfigurationException();
  }
  Path partitionLocation = locationAsPath(onePartition.get(0));
  int branches = partitionLocation.depth() - tableBaseLocation.depth();
  String globSuffix = StringUtils.repeat("*", "/", branches);
  globPath = new Path(tableBaseLocation, globSuffix);
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:18,代码来源:PartitionedTablePathResolver.java

示例2: addPartition

import org.apache.hadoop.hive.metastore.api.Partition; //导入依赖的package包/类
@Test
public void addPartition() throws Exception {
  String tableName = "my_table";
  createPartitionedTable(DATABASE, tableName);

  try (Connection connection = DriverManager.getConnection(server.connectionURL());
      Statement statement = connection.createStatement()) {
    String addPartitionHql = String.format("ALTER TABLE %s.%s ADD PARTITION (partcol=1)", DATABASE, tableName);
    statement.execute(addPartitionHql);
  }

  HiveMetaStoreClient client = server.newClient();
  try {
    List<Partition> partitions = client.listPartitions(DATABASE, tableName, (short) -1);
    assertThat(partitions.size(), is(1));
    assertThat(partitions.get(0).getDbName(), is(DATABASE));
    assertThat(partitions.get(0).getTableName(), is(tableName));
    assertThat(partitions.get(0).getValues(), is(Arrays.asList("1")));
    assertThat(partitions.get(0).getSd().getLocation(),
        is(String.format("file:%s/%s/%s/partcol=1", server.temporaryFolder.getRoot(), DATABASE, tableName)));
  } finally {
    client.close();
  }
}
 
开发者ID:HotelsDotCom,项目名称:beeju,代码行数:25,代码来源:HiveServer2JUnitRuleTest.java

示例3: getMetastorePaths

import org.apache.hadoop.hive.metastore.api.Partition; //导入依赖的package包/类
@Override
public Set<Path> getMetastorePaths(short batchSize, int expectedPathCount)
  throws NoSuchObjectException, MetaException, TException {
  Set<Path> metastorePaths = new HashSet<>(expectedPathCount);
  PartitionIterator partitionIterator = new PartitionIterator(metastore, table, batchSize);
  while (partitionIterator.hasNext()) {
    Partition partition = partitionIterator.next();
    Path location = PathUtils.normalise(locationAsPath(partition));
    if (!location.toString().toLowerCase().startsWith(tableBaseLocation.toString().toLowerCase())) {
      LOG.error("Check your configuration: '{}' does not appear to be part of '{}'.", location, tableBaseLocation);
      throw new ConfigurationException();
    }
    metastorePaths.add(location);
  }
  return metastorePaths;
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:17,代码来源:PartitionedTablePathResolver.java

示例4: HivePartition

import org.apache.hadoop.hive.metastore.api.Partition; //导入依赖的package包/类
@JsonCreator
public HivePartition(@JsonProperty("values") List<String> values, @JsonProperty("tableName") String tableName, @JsonProperty("dbName") String dbName, @JsonProperty("createTime") int createTime,
                     @JsonProperty("lastAccessTime") int lastAccessTime,  @JsonProperty("sd") StorageDescriptorWrapper sd,
                     @JsonProperty("parameters") Map<String, String> parameters
) {
  this.values = values;
  this.tableName = tableName;
  this.dbName = dbName;
  this.createTime = createTime;
  this.lastAccessTime = lastAccessTime;
  this.sd = sd;
  this.parameters = parameters;

  StorageDescriptor sdUnwrapped = sd.getSd();
  this.partition = new org.apache.hadoop.hive.metastore.api.Partition(values, tableName, dbName, createTime, lastAccessTime, sdUnwrapped, parameters);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:17,代码来源:HiveTable.java

示例5: hasNext

import org.apache.hadoop.hive.metastore.api.Partition; //导入依赖的package包/类
@Override
public boolean hasNext() {
  if (batch.hasNext()) {
    return true;
  }
  if (partitionNames.hasNext()) {
    List<String> names = partitionNames.next();
    try {
      List<Partition> partitions = metastore.getPartitionsByNames(table.getDbName(), table.getTableName(), names);
      count += partitions.size();
      LOG.debug("Retrieved {} partitions, total: {}.", partitions.size(), count);
      batch = partitions.iterator();
    } catch (TException e) {
      throw new RuntimeException(e);
    }
  }
  return batch.hasNext();
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:19,代码来源:PartitionIterator.java

示例6: tablesAreDifferent

import org.apache.hadoop.hive.metastore.api.Partition; //导入依赖的package包/类
@Test
public void tablesAreDifferent() throws Exception {
  Table sourceTable = catalog.client().getTable(DATABASE, SOURCE_TABLE);
  sourceTable.getParameters().put("com.company.team", "value");
  catalog.client().alter_table(DATABASE, SOURCE_TABLE, sourceTable);

  // Reload table object
  sourceTable = catalog.client().getTable(DATABASE, SOURCE_TABLE);
  Table replicaTable = catalog.client().getTable(DATABASE, REPLICA_TABLE);

  HiveDifferences
      .builder(diffListener)
      .comparatorRegistry(comparatorRegistry)
      .source(configuration, sourceTable, new PartitionIterator(catalog.client(), sourceTable, PARTITION_BATCH_SIZE))
      .replica(Optional.of(replicaTable),
          Optional.of(new BufferedPartitionFetcher(catalog.client(), replicaTable, PARTITION_BATCH_SIZE)))
      .checksumFunction(checksumFunction)
      .build()
      .run();
  verify(diffListener, times(1)).onChangedTable(anyList());
  verify(diffListener, never()).onNewPartition(anyString(), any(Partition.class));
  verify(diffListener, never()).onChangedPartition(anyString(), any(Partition.class), anyList());
  verify(diffListener, never()).onDataChanged(anyString(), any(Partition.class));
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:25,代码来源:HiveDifferencesIntegrationTest.java

示例7: HdfsSnapshotLocationManager

import org.apache.hadoop.hive.metastore.api.Partition; //导入依赖的package包/类
HdfsSnapshotLocationManager(
    HiveConf sourceHiveConf,
    String eventId,
    Table sourceTable,
    boolean snapshotsDisabled,
    String tableBasePath,
    SourceCatalogListener sourceCatalogListener) throws IOException {
  this(sourceHiveConf, eventId, sourceTable, Collections.<Partition> emptyList(), snapshotsDisabled, tableBasePath,
      sourceCatalogListener);
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:11,代码来源:HdfsSnapshotLocationManager.java

示例8: getLocationManager

import org.apache.hadoop.hive.metastore.api.Partition; //导入依赖的package包/类
public SourceLocationManager getLocationManager(
    Table table,
    List<Partition> partitions,
    String eventId,
    Map<String, Object> copierOptions)
  throws IOException {
  if (MetaStoreUtils.isView(table)) {
    return new ViewLocationManager();
  }
  HdfsSnapshotLocationManager hdfsSnapshotLocationManager = new HdfsSnapshotLocationManager(getHiveConf(), eventId,
      table, partitions, snapshotsDisabled, sourceTableLocation, sourceCatalogListener);
  boolean ignoreMissingFolder = MapUtils.getBooleanValue(copierOptions,
      CopierOptions.IGNORE_MISSING_PARTITION_FOLDER_ERRORS, false);
  if (ignoreMissingFolder) {
    return new FilterMissingPartitionsLocationManager(hdfsSnapshotLocationManager, getHiveConf());
  }
  return hdfsSnapshotLocationManager;
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:19,代码来源:Source.java

示例9: setupHiveTables

import org.apache.hadoop.hive.metastore.api.Partition; //导入依赖的package包/类
private void setupHiveTables() throws TException, IOException {
  List<FieldSchema> partitionKeys = Lists.newArrayList(newFieldSchema("p1"), newFieldSchema("p2"));

  File tableLocation = new File("db1", "table1");
  StorageDescriptor sd = newStorageDescriptor(tableLocation, "col0");
  table1 = newTable("table1", "db1", partitionKeys, sd);
  Partition partition1 = newPartition(table1, "value1", "value2");
  Partition partition2 = newPartition(table1, "value11", "value22");
  table1Partitions = Arrays.asList(partition1, partition2); //
  table1PartitionNames = Arrays.asList(Warehouse.makePartName(partitionKeys, partition1.getValues()),
      Warehouse.makePartName(partitionKeys, partition2.getValues()));

  File tableLocation2 = new File("db2", "table2");
  StorageDescriptor sd2 = newStorageDescriptor(tableLocation2, "col0");
  table2 = newTable("table2", "db2", partitionKeys, sd2);
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:17,代码来源:DiffGeneratedPartitionPredicateTest.java

示例10: noMatchingPartitions

import org.apache.hadoop.hive.metastore.api.Partition; //导入依赖的package包/类
@Test
public void noMatchingPartitions() throws Exception {
  PartitionsAndStatistics emptyPartitionsAndStats = new PartitionsAndStatistics(sourceTable.getPartitionKeys(),
      Collections.<Partition> emptyList(), Collections.<String, List<ColumnStatisticsObj>> emptyMap());
  when(source.getPartitions(sourceTable, PARTITION_PREDICATE, MAX_PARTITIONS)).thenReturn(emptyPartitionsAndStats);
  when(source.getLocationManager(sourceTable, Collections.<Partition> emptyList(), EVENT_ID, copierOptions))
      .thenReturn(sourceLocationManager);

  PartitionedTableReplication replication = new PartitionedTableReplication(DATABASE, TABLE, partitionPredicate,
      source, replica, copierFactoryManager, eventIdFactory, targetTableLocation, DATABASE, TABLE, copierOptions,
      listener);
  replication.replicate();

  verifyZeroInteractions(copier);
  InOrder replicationOrder = inOrder(sourceLocationManager, replica, replicaLocationManager, listener);
  replicationOrder.verify(replica).validateReplicaTable(DATABASE, TABLE);
  replicationOrder.verify(replica).updateMetadata(EVENT_ID, sourceTableAndStatistics, DATABASE, TABLE,
      replicaLocationManager);
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:20,代码来源:PartitionedTableReplicationTest.java

示例11: noMatchingPartitions

import org.apache.hadoop.hive.metastore.api.Partition; //导入依赖的package包/类
@Test
public void noMatchingPartitions() throws Exception {
  PartitionsAndStatistics emptyPartitionsAndStats = new PartitionsAndStatistics(sourceTable.getPartitionKeys(),
      Collections.<Partition> emptyList(), Collections.<String, List<ColumnStatisticsObj>> emptyMap());
  when(source.getPartitions(sourceTable, PARTITION_PREDICATE, MAX_PARTITIONS)).thenReturn(emptyPartitionsAndStats);
  when(source.getLocationManager(sourceTable, Collections.<Partition> emptyList(), EVENT_ID, copierOptions))
      .thenReturn(sourceLocationManager);

  PartitionedTableMetadataUpdateReplication replication = new PartitionedTableMetadataUpdateReplication(DATABASE,
      TABLE, partitionPredicate, source, replica, eventIdFactory, replicaLocation, DATABASE, TABLE);
  replication.replicate();

  verify(replica).validateReplicaTable(DATABASE, TABLE);
  verify(replica).updateMetadata(eq(EVENT_ID), eq(sourceTableAndStatistics), eq(DATABASE), eq(TABLE),
      any(MetadataUpdateReplicaLocationManager.class));
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:17,代码来源:PartitionedTableMetadataUpdateReplicationTest.java

示例12: typical

import org.apache.hadoop.hive.metastore.api.Partition; //导入依赖的package包/类
@Test
public void typical() throws Exception {
  List<FieldSchema> partitionKeys = Lists.newArrayList(newFieldSchema("a"), newFieldSchema("c"));
  Table table = newTable("t1", "db1", partitionKeys, newStorageDescriptor(new File("bla"), "col1"));
  List<Partition> partitions = Lists.newArrayList(newPartition(table, "b", "d"));
  statisticsPerPartitionName.put("a=b/c=d", columnStats);

  PartitionsAndStatistics partitionsAndStatistics = new PartitionsAndStatistics(partitionKeys, partitions,
      statisticsPerPartitionName);
  List<String> expectedName = Lists.newArrayList("a=b/c=d");

  assertThat(partitionsAndStatistics.getPartitionNames(), is(expectedName));
  assertThat(partitionsAndStatistics.getPartitions(), is(partitions));
  ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(false, "db1", "t1");
  statsDesc.setPartName("a=b/c=d");
  ColumnStatistics expectedStats = new ColumnStatistics(statsDesc, columnStats);
  assertThat(partitionsAndStatistics.getStatisticsForPartition(partitions.get(0)), is(expectedStats));
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:19,代码来源:PartitionsAndStatisticsTest.java

示例13: getPartitions

import org.apache.hadoop.hive.metastore.api.Partition; //导入依赖的package包/类
private static List<PartitionValue> getPartitions(Table table, Partition partition) {
  if(partition == null){
    return Collections.emptyList();
  }

  final List<String> partitionValues = partition.getValues();
  final List<PartitionValue> output = Lists.newArrayList();
  final List<FieldSchema> partitionKeys = table.getPartitionKeys();
  for(int i =0; i < partitionKeys.size(); i++){
    PartitionValue value = getPartitionValue(partitionKeys.get(i), partitionValues.get(i));
    if(value != null){
      output.add(value);
    }
  }
  return output;
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:17,代码来源:DatasetBuilder.java

示例14: replicaTableDoesNotExist

import org.apache.hadoop.hive.metastore.api.Partition; //导入依赖的package包/类
@Test
public void replicaTableDoesNotExist() {
  hiveDifferences = HiveDifferences
      .builder(diffListener)
      .comparatorRegistry(comparatorRegistry)
      .source(sourceConfiguration, sourceTable, sourcePartitionIterable)
      .replica(Optional.<Table> absent(), Optional.<PartitionFetcher> absent())
      .checksumFunction(checksumFunction)
      .build();
  hiveDifferences.run();

  InOrder inOrder = inOrder(diffListener);
  inOrder.verify(diffListener).onDiffStart(any(TableAndMetadata.class), any(Optional.class));
  verify(diffListener, never()).onChangedTable(anyList());
  inOrder.verify(diffListener, times(1)).onNewPartition(anyString(), any(Partition.class));
  verify(diffListener, never()).onChangedPartition(anyString(), any(Partition.class), anyList());
  verify(diffListener, never()).onDataChanged(anyString(), any(Partition.class));
  inOrder.verify(diffListener).onDiffEnd();
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:20,代码来源:HiveDifferencesTest.java

示例15: dropPartition

import org.apache.hadoop.hive.metastore.api.Partition; //导入依赖的package包/类
@Test
public void dropPartition() throws Exception {
  String tableName = "my_table";
  HiveMetaStoreClient client = server.newClient();

  try {
    Table table = createPartitionedTable(DATABASE, tableName);

    Partition partition = new Partition();
    partition.setDbName(DATABASE);
    partition.setTableName(tableName);
    partition.setValues(Arrays.asList("1"));
    partition.setSd(new StorageDescriptor(table.getSd()));
    partition.getSd().setLocation(
        String.format("file:%s/%s/%s/partcol=1", server.temporaryFolder.getRoot(), DATABASE, tableName));
    client.add_partition(partition);

    try (Connection connection = DriverManager.getConnection(server.connectionURL());
        Statement statement = connection.createStatement()) {
      String addPartitionHql = String.format("ALTER TABLE %s.%s DROP PARTITION (partcol=1)", DATABASE, tableName);
      statement.execute(addPartitionHql);
    }

    List<Partition> partitions = client.listPartitions(DATABASE, tableName, (short) -1);
    assertThat(partitions.size(), is(0));
  } finally {
    client.close();
  }
}
 
开发者ID:HotelsDotCom,项目名称:beeju,代码行数:30,代码来源:HiveServer2JUnitRuleTest.java


注:本文中的org.apache.hadoop.hive.metastore.api.Partition类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。