當前位置: 首頁>>代碼示例>>Java>>正文


Java FieldSchema類代碼示例

本文整理匯總了Java中org.apache.hadoop.hive.metastore.api.FieldSchema的典型用法代碼示例。如果您正苦於以下問題:Java FieldSchema類的具體用法?Java FieldSchema怎麽用?Java FieldSchema使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


FieldSchema類屬於org.apache.hadoop.hive.metastore.api包,在下文中一共展示了FieldSchema類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: createPartitionedTable

import org.apache.hadoop.hive.metastore.api.FieldSchema; //導入依賴的package包/類
private Table createPartitionedTable(String databaseName, String tableName) throws Exception {
  Table table = new Table();
  table.setDbName(DATABASE);
  table.setTableName(tableName);
  table.setPartitionKeys(Arrays.asList(new FieldSchema("partcol", "int", null)));
  table.setSd(new StorageDescriptor());
  table.getSd().setCols(Arrays.asList(new FieldSchema("id", "int", null), new FieldSchema("name", "string", null)));
  table.getSd().setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
  table.getSd().setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat");
  table.getSd().setSerdeInfo(new SerDeInfo());
  table.getSd().getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
  HiveMetaStoreClient client = server.newClient();
  client.createTable(table);
  client.close();
  return table;
}
 
開發者ID:HotelsDotCom,項目名稱:beeju,代碼行數:17,代碼來源:HiveServer2JUnitRuleTest.java

示例2: constructAvroTable

import org.apache.hadoop.hive.metastore.api.FieldSchema; //導入依賴的package包/類
private Table constructAvroTable(String database, String tableName, Schema schema, Partitioner partitioner)
    throws HiveMetaStoreException {
  Table table = newTable(database, tableName);
  table.setTableType(TableType.EXTERNAL_TABLE);
  table.getParameters().put("EXTERNAL", "TRUE");
  String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
  table.setDataLocation(new Path(tablePath));
  table.setSerializationLib(avroSerde);
  try {
    table.setInputFormatClass(avroInputFormat);
    table.setOutputFormatClass(avroOutputFormat);
  } catch (HiveException e) {
    throw new HiveMetaStoreException("Cannot find input/output format:", e);
  }
  List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
  table.setFields(columns);
  table.setPartCols(partitioner.partitionFields());
  table.getParameters().put(AVRO_SCHEMA_LITERAL, avroData.fromConnectSchema(schema).toString());
  return table;
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:21,代碼來源:AvroHiveUtil.java

示例3: constructParquetTable

import org.apache.hadoop.hive.metastore.api.FieldSchema; //導入依賴的package包/類
private Table constructParquetTable(String database, String tableName, Schema schema, Partitioner partitioner) throws HiveMetaStoreException {
  Table table = newTable(database, tableName);
  table.setTableType(TableType.EXTERNAL_TABLE);
  table.getParameters().put("EXTERNAL", "TRUE");
  String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
  table.setDataLocation(new Path(tablePath));
  table.setSerializationLib(getHiveParquetSerde());
  try {
    table.setInputFormatClass(getHiveParquetInputFormat());
    table.setOutputFormatClass(getHiveParquetOutputFormat());
  } catch (HiveException e) {
    throw new HiveMetaStoreException("Cannot find input/output format:", e);
  }
  // convert copycat schema schema to Hive columns
  List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
  table.setFields(columns);
  table.setPartCols(partitioner.partitionFields());
  return table;
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:20,代碼來源:ParquetHiveUtil.java

示例4: newInstance

import org.apache.hadoop.hive.metastore.api.FieldSchema; //導入依賴的package包/類
public FilterGenerator newInstance(TableReplication tableReplication) {
  SourceTable sourceTable = tableReplication.getSourceTable();
  String sourceDatabaseName = sourceTable.getDatabaseName();
  String sourceTableName = sourceTable.getTableName();
  String partitionFilter = sourceTable.getPartitionFilter();

  SourceHiveEndpoint source = sourceFactory.newInstance(tableReplication);
  TableAndStatistics tableAndStatistics = source.getTableAndStatistics(sourceDatabaseName, sourceTableName);
  List<FieldSchema> partitionKeys = tableAndStatistics.getTable().getPartitionKeys();
  if (partitionKeys != null && !partitionKeys.isEmpty()) {
    if (!tableReplication.getSourceTable().isGeneratePartitionFilter()) {
      checkSpelFilter(partitionFilter);
    }
    PartitionPredicate partitionPredicate = new PartitionPredicateFactory(sourceFactory, replicaFactory,
        expressionParser, null).newInstance(tableReplication);
    return new FilterGeneratorImpl(source, tableAndStatistics.getTable(), partitionFilter, partitionPredicate);
  } else {
    return DUMMY_FILTER_GENERATOR;
  }
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:21,代碼來源:FilterGeneratorFactory.java

示例5: newInstance

import org.apache.hadoop.hive.metastore.api.FieldSchema; //導入依賴的package包/類
@Override
public Replication newInstance(TableReplication tableReplication) {
  Replica replica = replicaFactory.newInstance(tableReplication);
  SourceTable sourceTable = tableReplication.getSourceTable();
  String sourceDatabaseName = sourceTable.getDatabaseName();
  String sourceTableName = sourceTable.getTableName();

  String replicaDatabaseName = tableReplication.getReplicaDatabaseName();
  String replicaTableName = tableReplication.getReplicaTableName();
  String replicaTableLocation = tableReplication.getReplicaTable().getTableLocation();

  Source source = sourceFactory.newInstance(tableReplication);
  validate(tableReplication, source, replica);
  TableAndStatistics tableAndStatistics = source.getTableAndStatistics(sourceDatabaseName, sourceTableName);
  List<FieldSchema> partitionKeys = tableAndStatistics.getTable().getPartitionKeys();

  Replication replication = null;
  if (partitionKeys == null || partitionKeys.isEmpty()) {
    replication = createUnpartitionedTableReplication(tableReplication, source, replica, sourceDatabaseName,
        sourceTableName, replicaDatabaseName, replicaTableName, replicaTableLocation, replication);
  } else {
    replication = createPartitionedTableReplication(tableReplication, source, replica, sourceDatabaseName,
        sourceTableName, replicaDatabaseName, replicaTableName, replicaTableLocation, replication);
  }
  return replication;
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:27,代碼來源:ReplicationFactoryImpl.java

示例6: filterOnReplicatedPartitions

import org.apache.hadoop.hive.metastore.api.FieldSchema; //導入依賴的package包/類
private PartitionsAndStatistics filterOnReplicatedPartitions(
    CloseableMetaStoreClient replicaClient,
    PartitionsAndStatistics sourcePartitionsAndStatistics,
    List<FieldSchema> partitionKeys)
  throws TException {
  Map<Partition, ColumnStatistics> statisticsByPartition = new LinkedHashMap<>();
  for (Partition partition : sourcePartitionsAndStatistics.getPartitions()) {
    try {
      replicaClient.getPartition(replicaDatabaseName, replicaTableName, partition.getValues());
      statisticsByPartition.put(partition, sourcePartitionsAndStatistics.getStatisticsForPartition(partition));
    } catch (NoSuchObjectException e) {
      LOG.debug("Partition {} doesn't exist, skipping it...", Warehouse.getQualifiedName(partition));
    }
  }
  return new PartitionsAndStatistics(partitionKeys, statisticsByPartition);
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:17,代碼來源:PartitionedTableMetadataUpdateReplication.java

示例7: getPartitions

import org.apache.hadoop.hive.metastore.api.FieldSchema; //導入依賴的package包/類
private static List<PartitionValue> getPartitions(Table table, Partition partition) {
  if(partition == null){
    return Collections.emptyList();
  }

  final List<String> partitionValues = partition.getValues();
  final List<PartitionValue> output = Lists.newArrayList();
  final List<FieldSchema> partitionKeys = table.getPartitionKeys();
  for(int i =0; i < partitionKeys.size(); i++){
    PartitionValue value = getPartitionValue(partitionKeys.get(i), partitionValues.get(i));
    if(value != null){
      output.add(value);
    }
  }
  return output;
}
 
開發者ID:dremio,項目名稱:dremio-oss,代碼行數:17,代碼來源:DatasetBuilder.java

示例8: setupHiveTables

import org.apache.hadoop.hive.metastore.api.FieldSchema; //導入依賴的package包/類
private void setupHiveTables() throws TException, IOException {
  List<FieldSchema> partitionKeys = Lists.newArrayList(newFieldSchema("p1"), newFieldSchema("p2"));

  File tableLocation = new File("db1", "table1");
  StorageDescriptor sd = newStorageDescriptor(tableLocation, "col0");
  table1 = newTable("table1", "db1", partitionKeys, sd);
  Partition partition1 = newPartition(table1, "value1", "value2");
  Partition partition2 = newPartition(table1, "value11", "value22");
  table1Partitions = Arrays.asList(partition1, partition2); //
  table1PartitionNames = Arrays.asList(Warehouse.makePartName(partitionKeys, partition1.getValues()),
      Warehouse.makePartName(partitionKeys, partition2.getValues()));

  File tableLocation2 = new File("db2", "table2");
  StorageDescriptor sd2 = newStorageDescriptor(tableLocation2, "col0");
  table2 = newTable("table2", "db2", partitionKeys, sd2);
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:17,代碼來源:DiffGeneratedPartitionPredicateTest.java

示例9: getRowType

import org.apache.hadoop.hive.metastore.api.FieldSchema; //導入依賴的package包/類
@Override
public RelDataType getRowType(RelDataTypeFactory typeFactory) {
  List<RelDataType> typeList = Lists.newArrayList();
  List<String> fieldNameList = Lists.newArrayList();

  List<FieldSchema> hiveFields = hiveTable.getCols();
  for(FieldSchema hiveField : hiveFields) {
    fieldNameList.add(hiveField.getName());
    typeList.add(getNullableRelDataTypeFromHiveType(
        typeFactory, TypeInfoUtils.getTypeInfoFromTypeString(hiveField.getType())));
  }

  for (FieldSchema field : hiveTable.getPartitionKeys()) {
    fieldNameList.add(field.getName());
    typeList.add(getNullableRelDataTypeFromHiveType(
        typeFactory, TypeInfoUtils.getTypeInfoFromTypeString(field.getType())));
  }

  return typeFactory.createStructType(typeList, fieldNameList);
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:21,代碼來源:DrillHiveTable.java

示例10: typical

import org.apache.hadoop.hive.metastore.api.FieldSchema; //導入依賴的package包/類
@Test
public void typical() throws Exception {
  List<FieldSchema> partitionKeys = Lists.newArrayList(newFieldSchema("a"), newFieldSchema("c"));
  Table table = newTable("t1", "db1", partitionKeys, newStorageDescriptor(new File("bla"), "col1"));
  List<Partition> partitions = Lists.newArrayList(newPartition(table, "b", "d"));
  statisticsPerPartitionName.put("a=b/c=d", columnStats);

  PartitionsAndStatistics partitionsAndStatistics = new PartitionsAndStatistics(partitionKeys, partitions,
      statisticsPerPartitionName);
  List<String> expectedName = Lists.newArrayList("a=b/c=d");

  assertThat(partitionsAndStatistics.getPartitionNames(), is(expectedName));
  assertThat(partitionsAndStatistics.getPartitions(), is(partitions));
  ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(false, "db1", "t1");
  statsDesc.setPartName("a=b/c=d");
  ColumnStatistics expectedStats = new ColumnStatistics(statsDesc, columnStats);
  assertThat(partitionsAndStatistics.getStatisticsForPartition(partitions.get(0)), is(expectedStats));
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:19,代碼來源:PartitionsAndStatisticsTest.java

示例11: createView

import org.apache.hadoop.hive.metastore.api.FieldSchema; //導入依賴的package包/類
private static Table createView(
    HiveMetaStoreClient metaStoreClient,
    String database,
    String view,
    String table,
    List<FieldSchema> partitionCols)
  throws TException {
  Table hiveView = new Table();
  hiveView.setDbName(database);
  hiveView.setTableName(view);
  hiveView.setTableType(TableType.VIRTUAL_VIEW.name());
  hiveView.setViewOriginalText(hql(database, table));
  hiveView.setViewExpandedText(expandHql(database, table, DATA_COLUMNS, partitionCols));
  hiveView.setPartitionKeys(partitionCols);

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(DATA_COLUMNS);
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());
  hiveView.setSd(sd);

  metaStoreClient.createTable(hiveView);

  return hiveView;
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:26,代碼來源:TestUtils.java

示例12: expandHql

import org.apache.hadoop.hive.metastore.api.FieldSchema; //導入依賴的package包/類
private static String expandHql(
    String database,
    String table,
    List<FieldSchema> dataColumns,
    List<FieldSchema> partitionColumns) {
  List<String> dataColumnNames = toQualifiedColumnNames(table, dataColumns);
  List<String> partitionColumnNames = partitionColumns != null ? toQualifiedColumnNames(table, partitionColumns)
      : ImmutableList.<String> of();
  List<String> colNames = ImmutableList
      .<String> builder()
      .addAll(dataColumnNames)
      .addAll(partitionColumnNames)
      .build();

  String cols = COMMA_JOINER.join(colNames);
  return String.format("SELECT %s FROM `%s`.`%s`", cols, database, table);
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:18,代碼來源:TestUtils.java

示例13: allShortCircuit

import org.apache.hadoop.hive.metastore.api.FieldSchema; //導入依賴的package包/類
@Test
public void allShortCircuit() {
  left.getPartition().getParameters().put("com.company.key", "value");
  left.getPartition().setValues(ImmutableList.of("p1", "p2"));
  List<PrivilegeGrantInfo> privilege = ImmutableList.of(new PrivilegeGrantInfo());
  left.getPartition().setPrivileges(new PrincipalPrivilegeSet(ImmutableMap.of("write", privilege), null, null));
  left.getPartition().getSd().setLocation("left");
  left.getPartition().getSd().setInputFormat("LeftInputFormat");
  left.getPartition().getSd().setOutputFormat("LeftOutputFormat");
  left.getPartition().getSd().getParameters().put("com.company.key", "value");
  left.getPartition().getSd().getSerdeInfo().setName("left serde info");
  left.getPartition().getSd().getSkewedInfo().setSkewedColNames(ImmutableList.of("left skewed col"));
  left.getPartition().getSd().setCols(ImmutableList.of(new FieldSchema("left", "type", "comment")));
  left.getPartition().getSd().setSortCols(ImmutableList.of(new Order()));
  left.getPartition().getSd().setBucketCols(ImmutableList.of("bucket"));
  left.getPartition().getSd().setNumBuckets(9000);

  List<Diff<Object, Object>> diffs = newPartitionAndMetadataComparator(SHORT_CIRCUIT).compare(left, right);

  assertThat(diffs, is(notNullValue()));
  assertThat(diffs.size(), is(1));
  assertThat(diffs.get(0), is(newPropertyDiff(PartitionAndMetadata.class, "partition.parameters",
      left.getPartition().getParameters(), right.getPartition().getParameters())));
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:25,代碼來源:PartitionAndMetadataComparatorTest.java

示例14: sdColsSameNumberOfColsFullComparison

import org.apache.hadoop.hive.metastore.api.FieldSchema; //導入依賴的package包/類
@Test
public void sdColsSameNumberOfColsFullComparison() {
  left.getTable().getSd().setCols(
      ImmutableList.of(new FieldSchema("left1", "type", "comment1"), new FieldSchema("left2", "type", "comment2")));
  List<Diff<Object, Object>> diffs = newTableAndMetadataComparator(FULL_COMPARISON).compare(left, right);
  assertThat(diffs, is(notNullValue()));
  assertThat(diffs.size(), is(4));
  assertThat(diffs.get(0),
      is(newDiff(
          "Element 0 of collection table.sd.cols of class com.google.common.collect.RegularImmutableList is different: Property name of class org.apache.hadoop.hive.metastore.api.FieldSchema is different",
          left.getTable().getSd().getCols().get(0).getName(), right.getTable().getSd().getCols().get(0).getName())));
  assertThat(diffs.get(1),
      is(newDiff(
          "Element 0 of collection table.sd.cols of class com.google.common.collect.RegularImmutableList is different: Property type of class org.apache.hadoop.hive.metastore.api.FieldSchema is different",
          left.getTable().getSd().getCols().get(0).getType(), right.getTable().getSd().getCols().get(0).getType())));
  assertThat(diffs.get(2),
      is(newDiff(
          "Element 1 of collection table.sd.cols of class com.google.common.collect.RegularImmutableList is different: Property name of class org.apache.hadoop.hive.metastore.api.FieldSchema is different",
          left.getTable().getSd().getCols().get(1).getName(), right.getTable().getSd().getCols().get(1).getName())));
  assertThat(diffs.get(3),
      is(newDiff(
          "Element 1 of collection table.sd.cols of class com.google.common.collect.RegularImmutableList is different: Property type of class org.apache.hadoop.hive.metastore.api.FieldSchema is different",
          left.getTable().getSd().getCols().get(1).getType(), right.getTable().getSd().getCols().get(1).getType())));
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:25,代碼來源:TableAndMetadataComparatorTest.java

示例15: HiveTable

import org.apache.hadoop.hive.metastore.api.FieldSchema; //導入依賴的package包/類
public HiveTable(Table table) {
  if (table == null) {
    return;
  }
  this.table = table;
  this.tableName = table.getTableName();
  this.dbName = table.getDbName();
  this.owner = table.getOwner();
  this.createTime = table.getCreateTime();
  this.lastAccessTime = table.getLastAccessTime();
  this.retention = table.getRetention();
  this.sd = new StorageDescriptorWrapper(table.getSd());
  this.partitionKeys = Lists.newArrayList();
  for (FieldSchema f : table.getPartitionKeys()) {
    this.partitionKeys.add(new FieldSchemaWrapper(f));
    partitionNameTypeMap.put(f.getName(), f.getType());
  }
  this.parameters = table.getParameters();
  this.viewOriginalText = table.getViewOriginalText();
  this.viewExpandedText = table.getViewExpandedText();
  this.tableType = table.getTableType();
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:23,代碼來源:HiveTable.java


注:本文中的org.apache.hadoop.hive.metastore.api.FieldSchema類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。