当前位置: 首页>>代码示例>>Java>>正文


Java Table类代码示例

本文整理汇总了Java中org.apache.hadoop.hive.ql.metadata.Table的典型用法代码示例。如果您正苦于以下问题:Java Table类的具体用法?Java Table怎么用?Java Table使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Table类属于org.apache.hadoop.hive.ql.metadata包,在下文中一共展示了Table类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: constructAvroTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
private Table constructAvroTable(String database, String tableName, Schema schema, Partitioner partitioner)
    throws HiveMetaStoreException {
  Table table = newTable(database, tableName);
  table.setTableType(TableType.EXTERNAL_TABLE);
  table.getParameters().put("EXTERNAL", "TRUE");
  String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
  table.setDataLocation(new Path(tablePath));
  table.setSerializationLib(avroSerde);
  try {
    table.setInputFormatClass(avroInputFormat);
    table.setOutputFormatClass(avroOutputFormat);
  } catch (HiveException e) {
    throw new HiveMetaStoreException("Cannot find input/output format:", e);
  }
  List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
  table.setFields(columns);
  table.setPartCols(partitioner.partitionFields());
  table.getParameters().put(AVRO_SCHEMA_LITERAL, avroData.fromConnectSchema(schema).toString());
  return table;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:21,代码来源:AvroHiveUtil.java

示例2: constructParquetTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
private Table constructParquetTable(String database, String tableName, Schema schema, Partitioner partitioner) throws HiveMetaStoreException {
  Table table = newTable(database, tableName);
  table.setTableType(TableType.EXTERNAL_TABLE);
  table.getParameters().put("EXTERNAL", "TRUE");
  String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
  table.setDataLocation(new Path(tablePath));
  table.setSerializationLib(getHiveParquetSerde());
  try {
    table.setInputFormatClass(getHiveParquetInputFormat());
    table.setOutputFormatClass(getHiveParquetOutputFormat());
  } catch (HiveException e) {
    throw new HiveMetaStoreException("Cannot find input/output format:", e);
  }
  // convert copycat schema schema to Hive columns
  List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
  table.setFields(columns);
  table.setPartCols(partitioner.partitionFields());
  return table;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:20,代码来源:ParquetHiveUtil.java

示例3: constructAvroTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
private Table constructAvroTable(String database, String tableName, Schema schema, Partitioner partitioner)
    throws HiveMetaStoreException {
  Table table = new Table(database, tableName);
  table.setTableType(TableType.EXTERNAL_TABLE);
  table.getParameters().put("EXTERNAL", "TRUE");
  String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
  table.setDataLocation(new Path(tablePath));
  table.setSerializationLib(avroSerde);
  try {
    table.setInputFormatClass(avroInputFormat);
    table.setOutputFormatClass(avroOutputFormat);
  } catch (HiveException e) {
    throw new HiveMetaStoreException("Cannot find input/output format:", e);
  }
  List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
  table.setFields(columns);
  table.setPartCols(partitioner.partitionFields());
  table.getParameters().put(AVRO_SCHEMA_LITERAL, avroData.fromConnectSchema(schema).toString());
  return table;
}
 
开发者ID:qubole,项目名称:streamx,代码行数:21,代码来源:AvroHiveUtil.java

示例4: constructParquetTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
private Table constructParquetTable(String database, String tableName, Schema schema, Partitioner partitioner) throws HiveMetaStoreException {
  Table table = new Table(database, tableName);
  table.setTableType(TableType.EXTERNAL_TABLE);
  table.getParameters().put("EXTERNAL", "TRUE");
  String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
  table.setDataLocation(new Path(tablePath));
  table.setSerializationLib(getHiveParquetSerde());
  try {
    table.setInputFormatClass(getHiveParquetInputFormat());
    table.setOutputFormatClass(getHiveParquetOutputFormat());
  } catch (HiveException e) {
    throw new HiveMetaStoreException("Cannot find input/output format:", e);
  }
  // convert copycat schema schema to Hive columns
  List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
  table.setFields(columns);
  table.setPartCols(partitioner.partitionFields());
  return table;
}
 
开发者ID:qubole,项目名称:streamx,代码行数:20,代码来源:ParquetHiveUtil.java

示例5: getEstimatedSizeBytes

import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
/**
 * Returns the size of the table in bytes, does not take into consideration filter/partition
 * details passed, if any.
 */
@Override
public long getEstimatedSizeBytes(PipelineOptions pipelineOptions) throws Exception {
  Configuration conf = new Configuration();
  for (Entry<String, String> entry : spec.getConfigProperties().entrySet()) {
    conf.set(entry.getKey(), entry.getValue());
  }
  IMetaStoreClient client = null;
  try {
    HiveConf hiveConf = HCatUtil.getHiveConf(conf);
    client = HCatUtil.getHiveMetastoreClient(hiveConf);
    Table table = HCatUtil.getTable(client, spec.getDatabase(), spec.getTable());
    return StatsUtils.getFileSizeForTable(hiveConf, table);
  } finally {
    // IMetaStoreClient is not AutoCloseable, closing it manually
    if (client != null) {
      client.close();
    }
  }
}
 
开发者ID:apache,项目名称:beam,代码行数:24,代码来源:HCatalogIO.java

示例6: insertThriftRenameTableLogEntry

import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
/**
 * Insert a thrift audit log entry that represents renaming a table.
 *
 * @param oldTable the source table
 * @param newTable the table renamed to
 * @param hiveConf Hive configuration
 * @throws Exception if there's an error inserting into the audit log
 */
public static void insertThriftRenameTableLogEntry(
    org.apache.hadoop.hive.metastore.api.Table oldTable,
    org.apache.hadoop.hive.metastore.api.Table newTable,
    HiveConf hiveConf) throws Exception {
  final MetastoreAuditLogListener metastoreAuditLogListener =
      new MetastoreAuditLogListener(hiveConf);

  AlterTableEvent event = new AlterTableEvent(
      oldTable,
      newTable,
      true,
      null
  );

  metastoreAuditLogListener.onAlterTable(event);
}
 
开发者ID:airbnb,项目名称:reair,代码行数:25,代码来源:AuditLogHookUtils.java

示例7: onCreateTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
/**
 * Listener which fires when a table is created.
 *
 * <p>For auditing purposes the read/write differential is the non-existence
 * and existence of the created table respectively.</p>
 *
 * @param event The create table event
 */
@Override
public void onCreateTable(CreateTableEvent event) throws MetaException {
  try {
    Set<ReadEntity> readEntities = new HashSet<>();
    Set<WriteEntity> writeEntities = new HashSet<>();

    writeEntities.add(
        new WriteEntity(
            new Table(event.getTable()),
            WriteType.INSERT
        )
    );

    run(readEntities, writeEntities, HiveOperation.THRIFT_CREATE_TABLE);
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:airbnb,项目名称:reair,代码行数:27,代码来源:MetastoreAuditLogListener.java

示例8: onAlterTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
/**
 * Listener which fires when a table is altered.
 *
 * <p>For auditing purposes the read/write differential is the old and new
 * table respectively.</p>
 *
 * @param event The add partition event
 */
@Override
public void onAlterTable(AlterTableEvent event) throws MetaException {
  try {
    Set<ReadEntity> readEntities = new HashSet<>();
    readEntities.add(new ReadEntity(new Table(event.getOldTable())));
    Set<WriteEntity> writeEntities = new HashSet<>();

    writeEntities.add(
        new WriteEntity(
            new Table(event.getNewTable()),
            WriteType.INSERT
        )
    );

    run(readEntities, writeEntities, HiveOperation.THRIFT_ALTER_TABLE);
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:airbnb,项目名称:reair,代码行数:28,代码来源:MetastoreAuditLogListener.java

示例9: onAddPartition

import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
/**
 * Listener which fires when a partition is added.
 *
 * <p>For auditing purposes the read/write differential is the non-existence
 * and existence of the added partition respectively.</p>
 *
 * @param event The add partition event
 */
@Override
public void onAddPartition(AddPartitionEvent event) throws MetaException {
  try {
    Table table = new Table(event.getTable());
    Set<ReadEntity> readEntities = new HashSet<>();
    Set<WriteEntity> writeEntities = new HashSet<>();

    for (org.apache.hadoop.hive.metastore.api.Partition partition :
        event.getPartitions()) {
      writeEntities.add(
          new WriteEntity(
              new Partition(table, partition),
              WriteType.INSERT
          )
      );
    }

    run(readEntities, writeEntities, HiveOperation.THRIFT_ADD_PARTITION);
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:airbnb,项目名称:reair,代码行数:31,代码来源:MetastoreAuditLogListener.java

示例10: onDropPartition

import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
/**
 * Listener which fires when a partition is dropped.
 *
 * <p>For auditing purposes the read/write differential is the existence and
 * non-existence of the dropped partition respectively.</p>
 *
 * @param event The drop partition event
 */
@Override
public void onDropPartition(DropPartitionEvent event) throws MetaException {
  try {
    Set<ReadEntity> readEntities = new HashSet<>();

    readEntities.add(
        new ReadEntity(
          new Partition(new Table(event.getTable()), event.getPartition())
        )
    );

    Set<WriteEntity> writeEntities = new HashSet<>();

    run(readEntities, writeEntities, HiveOperation.THRIFT_DROP_PARTITION);
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:airbnb,项目名称:reair,代码行数:27,代码来源:MetastoreAuditLogListener.java

示例11: getCreateTableString

import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
private String getCreateTableString(Table table, String location){
    String colString = "";
    List<FieldSchema> colList = table.getAllCols();
    if ( colList != null) {
        for (FieldSchema col : colList) {
            colString += col.getName() + " " + col.getType() + ",";
        }
        if (colList.size() > 0) {
            colString = colString.substring(0, colString.length() - 1);
            colString = "(" + colString + ")";
        }
    }
    String query = "create external table " + table.getTableName() +  colString +
            " location '" + location + "'";
    return query;
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:17,代码来源:HiveMetaStoreBridge.java

示例12: registerTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
private Referenceable registerTable(Referenceable dbReference, Table table) throws AtlasHookException {
    try {
        String dbName = table.getDbName();
        String tableName = table.getTableName();
        LOG.info("Attempting to register table [{}]", tableName);
        Referenceable tableReference = getTableReference(table);
        LOG.info("Found result {}", tableReference);
        if (tableReference == null) {
            tableReference = createTableInstance(dbReference, table);
            tableReference = registerInstance(tableReference);
        } else {
            LOG.info("Table {}.{} is already registered with id {}. Updating entity.", dbName, tableName,
                    tableReference.getId().id);
            tableReference = createOrUpdateTableInstance(dbReference, tableReference, table);
            updateInstance(tableReference);
        }
        return tableReference;
    } catch (Exception e) {
        throw new AtlasHookException("HiveMetaStoreBridge.getStorageDescQFName() failed.", e);
    }
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:22,代码来源:HiveMetaStoreBridge.java

示例13: replaceTableQFName

import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
private Referenceable replaceTableQFName(HiveEventContext event, Table oldTable, Table newTable, final Referenceable tableEntity, final String oldTableQFName, final String newTableQFName) throws HiveException {
    tableEntity.set(AtlasClient.NAME,  oldTable.getTableName().toLowerCase());
    tableEntity.set(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, oldTableQFName);

    //Replace table entity with new name
    final Referenceable newEntity = new Referenceable(HiveDataTypes.HIVE_TABLE.getName());
    newEntity.set(AtlasClient.NAME, newTable.getTableName().toLowerCase());
    newEntity.set(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, newTableQFName);

    ArrayList<String> alias_list = new ArrayList<>();
    alias_list.add(oldTable.getTableName().toLowerCase());
    newEntity.set(HiveMetaStoreBridge.TABLE_ALIAS_LIST, alias_list);
    event.addMessage(new HookNotification.EntityPartialUpdateRequest(event.getUser(),
        HiveDataTypes.HIVE_TABLE.getName(), AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME,
        oldTableQFName, newEntity));

    return newEntity;
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:19,代码来源:HiveHook.java

示例14: testImportThatUpdatesRegisteredTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
@Test
public void testImportThatUpdatesRegisteredTable() throws Exception {
    setupDB(hiveClient, TEST_DB_NAME);

    List<Table> hiveTables = setupTables(hiveClient, TEST_DB_NAME, TEST_TABLE_NAME);

    returnExistingDatabase(TEST_DB_NAME, atlasClient, CLUSTER_NAME);

    // return existing table
    when(atlasClient.getEntity(HiveDataTypes.HIVE_TABLE.getName(),
        AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, HiveMetaStoreBridge.getTableQualifiedName(CLUSTER_NAME, TEST_DB_NAME, TEST_TABLE_NAME)))
        .thenReturn(getEntityReference(HiveDataTypes.HIVE_TABLE.getName(), "82e06b34-9151-4023-aa9d-b82103a50e77"));
    when(atlasClient.getEntity("82e06b34-9151-4023-aa9d-b82103a50e77")).thenReturn(createTableReference());
    String processQualifiedName = HiveMetaStoreBridge.getTableProcessQualifiedName(CLUSTER_NAME, hiveTables.get(0));
    when(atlasClient.getEntity(HiveDataTypes.HIVE_PROCESS.getName(),
        AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, processQualifiedName)).thenReturn(getEntityReference(HiveDataTypes.HIVE_PROCESS.getName(), "82e06b34-9151-4023-aa9d-b82103a50e77"));

    HiveMetaStoreBridge bridge = new HiveMetaStoreBridge(CLUSTER_NAME, hiveClient, atlasClient);
    bridge.importHiveMetadata(true);

    // verify update is called on table
    verify(atlasClient).updateEntity(eq("82e06b34-9151-4023-aa9d-b82103a50e77"),
            (Referenceable) argThat(new MatchesReferenceableProperty(HiveMetaStoreBridge.TABLE_TYPE_ATTR,
                    TableType.EXTERNAL_TABLE.name())));
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:26,代码来源:HiveMetaStoreBridgeTest.java

示例15: testImportContinuesWhenTableRegistrationFails

import org.apache.hadoop.hive.ql.metadata.Table; //导入依赖的package包/类
@Test
public void testImportContinuesWhenTableRegistrationFails() throws Exception {
    setupDB(hiveClient, TEST_DB_NAME);
    final String table2Name = TEST_TABLE_NAME + "_1";
    List<Table> hiveTables = setupTables(hiveClient, TEST_DB_NAME, TEST_TABLE_NAME, table2Name);

    returnExistingDatabase(TEST_DB_NAME, atlasClient, CLUSTER_NAME);
    when(hiveClient.getTable(TEST_DB_NAME, TEST_TABLE_NAME)).thenThrow(new RuntimeException("Timeout while reading data from hive metastore"));

    when(atlasClient.getEntity(HiveDataTypes.HIVE_TABLE.getName(), AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, HiveMetaStoreBridge.getTableQualifiedName(CLUSTER_NAME, TEST_DB_NAME,
        table2Name))).thenReturn(
        getEntityReference(HiveDataTypes.HIVE_TABLE.getName(), "82e06b34-9151-4023-aa9d-b82103a50e77"));
    when(atlasClient.getEntity("82e06b34-9151-4023-aa9d-b82103a50e77")).thenReturn(createTableReference());
    String processQualifiedName = HiveMetaStoreBridge.getTableProcessQualifiedName(CLUSTER_NAME, hiveTables.get(1));
    when(atlasClient.getEntity(HiveDataTypes.HIVE_PROCESS.getName(), AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME,
        processQualifiedName)).thenReturn(getEntityReference(HiveDataTypes.HIVE_PROCESS.getName(), "82e06b34-9151-4023-aa9d-b82103a50e77"));

    HiveMetaStoreBridge bridge = new HiveMetaStoreBridge(CLUSTER_NAME, hiveClient, atlasClient);
    try {
        bridge.importHiveMetadata(false);
    } catch (Exception e) {
        Assert.fail("Table registration failed with exception", e);
    }
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:25,代码来源:HiveMetaStoreBridgeTest.java


注:本文中的org.apache.hadoop.hive.ql.metadata.Table类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。