当前位置: 首页>>代码示例>>Java>>正文


Java StorageDescriptor.setParameters方法代码示例

本文整理汇总了Java中org.apache.hadoop.hive.metastore.api.StorageDescriptor.setParameters方法的典型用法代码示例。如果您正苦于以下问题:Java StorageDescriptor.setParameters方法的具体用法?Java StorageDescriptor.setParameters怎么用?Java StorageDescriptor.setParameters使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hive.metastore.api.StorageDescriptor的用法示例。


在下文中一共展示了StorageDescriptor.setParameters方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createPartitionedTable

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
static Table createPartitionedTable(HiveMetaStoreClient metaStoreClient, String database, String table, File location)
  throws Exception {

  Table hiveTable = new Table();
  hiveTable.setDbName(database);
  hiveTable.setTableName(table);
  hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
  hiveTable.putToParameters("EXTERNAL", "TRUE");

  hiveTable.setPartitionKeys(PARTITION_COLUMNS);

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(DATA_COLUMNS);
  sd.setLocation(location.toURI().toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());

  hiveTable.setSd(sd);

  metaStoreClient.createTable(hiveTable);

  return hiveTable;
}
 
开发者ID:HotelsDotCom,项目名称:waggle-dance,代码行数:24,代码来源:TestUtils.java

示例2: createView

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private static Table createView(
    HiveMetaStoreClient metaStoreClient,
    String database,
    String view,
    String table,
    List<FieldSchema> partitionCols)
  throws TException {
  Table hiveView = new Table();
  hiveView.setDbName(database);
  hiveView.setTableName(view);
  hiveView.setTableType(TableType.VIRTUAL_VIEW.name());
  hiveView.setViewOriginalText(hql(database, table));
  hiveView.setViewExpandedText(expandHql(database, table, DATA_COLUMNS, partitionCols));
  hiveView.setPartitionKeys(partitionCols);

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(DATA_COLUMNS);
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());
  hiveView.setSd(sd);

  metaStoreClient.createTable(hiveView);

  return hiveView;
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:26,代码来源:TestUtils.java

示例3: createUnpartitionedTable

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
static Table createUnpartitionedTable(
    HiveMetaStoreClient metaStoreClient,
    String database,
    String table,
    File location)
  throws TException {
  Table hiveTable = new Table();
  hiveTable.setDbName(database);
  hiveTable.setTableName(table);
  hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
  hiveTable.putToParameters("EXTERNAL", "TRUE");

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(DATA_COLUMNS);
  sd.setLocation(location.toURI().toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());

  hiveTable.setSd(sd);

  metaStoreClient.createTable(hiveTable);

  return hiveTable;
}
 
开发者ID:HotelsDotCom,项目名称:waggle-dance,代码行数:25,代码来源:TestUtils.java

示例4: makeMetastoreTableObject

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public Table makeMetastoreTableObject(HiveMetaStoreClient client,
    String dbName, String tabName, List<FieldSchema> cols) throws Exception {
  Table tbl = new Table();
  tbl.setDbName(dbName);
  tbl.setTableName(tabName);
  StorageDescriptor sd = new StorageDescriptor();
  tbl.setSd(sd);
  tbl.setParameters(new HashMap<String, String>());
  sd.setCols(cols);
  sd.setCompressed(false);
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());
  sd.getSerdeInfo().setName(tbl.getTableName());
  sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  sd.getSerdeInfo().getParameters()
      .put(serdeConstants.SERIALIZATION_FORMAT, "1");
  sd.setSortCols(new ArrayList<Order>());
  return tbl;
}
 
开发者ID:apache,项目名称:incubator-sentry,代码行数:20,代码来源:AbstractMetastoreTestWithStaticConfiguration.java

示例5: createTable

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private void createTable(File sourceTableUri) throws Exception {
  File partitionEurope = new File(sourceTableUri, "local_date=2000-01-01");
  File partitionUk = new File(partitionEurope, "local_hour=0");
  File dataFileUk = new File(partitionUk, PART_00000);
  FileUtils.writeStringToFile(dataFileUk, "1\tadam\tlondon\n2\tsusan\tglasgow\n");

  File partitionAsia = new File(sourceTableUri, "local_date=2000-01-02");
  File partitionChina = new File(partitionAsia, "local_hour=0");
  File dataFileChina = new File(partitionChina, PART_00000);
  String data = "1\tchun\tbeijing\n2\tshanghai\tmilan\n";
  FileUtils.writeStringToFile(dataFileChina, data);

  HiveMetaStoreClient sourceClient = sourceCatalog.client();

  Table source = new Table();
  source.setDbName(DATABASE);
  source.setTableName(TABLE);
  source.setTableType(TableType.EXTERNAL_TABLE.name());
  source.setParameters(new HashMap<String, String>());

  List<FieldSchema> partitionColumns = Arrays.asList(new FieldSchema("local_date", "string", ""),
      new FieldSchema("local_hour", "string", ""));
  source.setPartitionKeys(partitionColumns);

  List<FieldSchema> dataColumns = Arrays.asList(new FieldSchema("id", "bigint", ""),
      new FieldSchema("name", "string", ""), new FieldSchema("city", "tinyint", ""));

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(dataColumns);
  sd.setLocation(sourceTableUri.toURI().toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());

  source.setSd(sd);

  sourceClient.createTable(source);
  LOG.info(">>>> Partitions added: {}",
      +sourceClient.add_partitions(Arrays.asList(newPartition(sd, Arrays.asList("2000-01-01", "0"), partitionUk),
          newPartition(sd, Arrays.asList("2000-01-02", "0"), partitionChina))));
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:41,代码来源:FilterToolIntegrationTest.java

示例6: createUnpartitionedTable

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public static Table createUnpartitionedTable(
    HiveMetaStoreClient metaStoreClient,
    String database,
    String table,
    URI location)
  throws TException {
  Table hiveTable = new Table();
  hiveTable.setDbName(database);
  hiveTable.setTableName(table);
  hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
  hiveTable.putToParameters("EXTERNAL", "TRUE");

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(DATA_COLUMNS);
  sd.setLocation(location.toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setInputFormat(TextInputFormat.class.getName());
  sd.setOutputFormat(TextOutputFormat.class.getName());
  sd.setSerdeInfo(new SerDeInfo());
  sd.getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.OpenCSVSerde");

  hiveTable.setSd(sd);

  metaStoreClient.createTable(hiveTable);

  ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, database, table);
  ColumnStatisticsData statsData = new ColumnStatisticsData(_Fields.LONG_STATS, new LongColumnStatsData(1L, 2L));
  ColumnStatisticsObj cso1 = new ColumnStatisticsObj("id", "bigint", statsData);
  List<ColumnStatisticsObj> statsObj = Collections.singletonList(cso1);
  metaStoreClient.updateTableColumnStatistics(new ColumnStatistics(statsDesc, statsObj));

  return hiveTable;
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:34,代码来源:TestUtils.java

示例7: createPartitionedTable

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public static Table createPartitionedTable(
    HiveMetaStoreClient metaStoreClient,
    String database,
    String table,
    URI location)
  throws Exception {

  Table hiveTable = new Table();
  hiveTable.setDbName(database);
  hiveTable.setTableName(table);
  hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
  hiveTable.putToParameters("EXTERNAL", "TRUE");

  hiveTable.setPartitionKeys(PARTITION_COLUMNS);

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(DATA_COLUMNS);
  sd.setLocation(location.toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setInputFormat(TextInputFormat.class.getName());
  sd.setOutputFormat(TextOutputFormat.class.getName());
  sd.setSerdeInfo(new SerDeInfo());
  sd.getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.OpenCSVSerde");

  hiveTable.setSd(sd);

  metaStoreClient.createTable(hiveTable);

  ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, database, table);
  ColumnStatisticsData statsData = new ColumnStatisticsData(_Fields.LONG_STATS, new LongColumnStatsData(1L, 2L));
  ColumnStatisticsObj cso1 = new ColumnStatisticsObj("id", "bigint", statsData);
  List<ColumnStatisticsObj> statsObj = Collections.singletonList(cso1);
  metaStoreClient.updateTableColumnStatistics(new ColumnStatistics(statsDesc, statsObj));

  return hiveTable;
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:37,代码来源:TestUtils.java

示例8: newTable

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public static Table newTable(String database, String tableName) {
  Table table = new Table();
  table.setDbName(database);
  table.setTableName(tableName);
  table.setTableType(TABLE_TYPE);
  table.setOwner(OWNER);
  table.setCreateTime(CREATE_TIME);
  table.setRetention(RETENTION);

  Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
  userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
  PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
  privileges.setUserPrivileges(userPrivileges);
  table.setPrivileges(privileges);

  StorageDescriptor storageDescriptor = new StorageDescriptor();
  storageDescriptor.setCols(COLS);
  storageDescriptor.setInputFormat(INPUT_FORMAT);
  storageDescriptor.setOutputFormat(OUTPUT_FORMAT);
  storageDescriptor.setSerdeInfo(new SerDeInfo(SERDE_INFO_NAME, SERIALIZATION_LIB, new HashMap<String, String>()));
  storageDescriptor.setSkewedInfo(new SkewedInfo());
  storageDescriptor.setParameters(new HashMap<String, String>());
  storageDescriptor.setLocation(DATABASE + "/" + tableName + "/");
  table.setSd(storageDescriptor);

  Map<String, String> parameters = new HashMap<>();
  parameters.put("com.company.parameter", "abc");
  table.setParameters(parameters);

  return table;
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:32,代码来源:TestUtils.java

示例9: newPartition

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public static Partition newPartition(String database, String tableName, String partitionValue) {
  Partition partition = new Partition();
  partition.setDbName(database);
  partition.setTableName(tableName);
  partition.setCreateTime(CREATE_TIME);
  partition.setValues(ImmutableList.of(partitionValue));

  Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
  userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
  PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
  privileges.setUserPrivileges(userPrivileges);
  partition.setPrivileges(privileges);

  StorageDescriptor storageDescriptor = new StorageDescriptor();
  storageDescriptor.setCols(COLS);
  storageDescriptor.setInputFormat(INPUT_FORMAT);
  storageDescriptor.setOutputFormat(OUTPUT_FORMAT);
  storageDescriptor.setSerdeInfo(new SerDeInfo(SERDE_INFO_NAME, SERIALIZATION_LIB, new HashMap<String, String>()));
  storageDescriptor.setSkewedInfo(new SkewedInfo());
  storageDescriptor.setParameters(new HashMap<String, String>());
  storageDescriptor.setLocation(DATABASE + "/" + tableName + "/" + partitionValue + "/");
  partition.setSd(storageDescriptor);

  Map<String, String> parameters = new HashMap<>();
  parameters.put("com.company.parameter", "abc");
  partition.setParameters(parameters);

  return partition;
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:30,代码来源:TestUtils.java

示例10: init

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Before
public void init() {
  table = new Table();
  table.setDbName("database");
  table.setTableName("table");
  table.setTableType("type");

  Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
  userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
  PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
  privileges.setUserPrivileges(userPrivileges);
  table.setPrivileges(privileges);

  StorageDescriptor storageDescriptor = new StorageDescriptor();
  storageDescriptor.setCols(Arrays.asList(new FieldSchema("a", "int", null)));
  storageDescriptor.setInputFormat("input_format");
  storageDescriptor.setOutputFormat("output_format");
  storageDescriptor.setSerdeInfo(new SerDeInfo("serde", "lib", new HashMap<String, String>()));
  storageDescriptor.setSkewedInfo(new SkewedInfo());
  storageDescriptor.setParameters(new HashMap<String, String>());
  storageDescriptor.setLocation("database/table/");
  table.setSd(storageDescriptor);

  Map<String, String> parameters = new HashMap<>();
  parameters.put("com.company.parameter", "abc");
  table.setParameters(parameters);
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:28,代码来源:TableTransformationTest.java

示例11: init

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Before
public void init() {
  partition = new Partition();
  partition.setDbName("database");
  partition.setTableName("table");
  partition.setValues(ImmutableList.of("part"));

  Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
  userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
  PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
  privileges.setUserPrivileges(userPrivileges);
  partition.setPrivileges(privileges);

  StorageDescriptor storageDescriptor = new StorageDescriptor();
  storageDescriptor.setCols(Arrays.asList(new FieldSchema("a", "int", null)));
  storageDescriptor.setInputFormat("input_format");
  storageDescriptor.setOutputFormat("output_format");
  storageDescriptor.setSerdeInfo(new SerDeInfo("serde", "lib", new HashMap<String, String>()));
  storageDescriptor.setSkewedInfo(new SkewedInfo());
  storageDescriptor.setParameters(new HashMap<String, String>());
  storageDescriptor.setLocation("database/table/part/");
  partition.setSd(storageDescriptor);

  Map<String, String> parameters = new HashMap<>();
  parameters.put("com.company.parameter", "abc");
  partition.setParameters(parameters);
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:28,代码来源:PartitionTransformationTest.java

示例12: copyTableSdToPartitionSd

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private void copyTableSdToPartitionSd(final List<Partition> hivePartitions, final Table table) {
    //
    // Update the partition info based on that of the table.
    //
    for (Partition partition : hivePartitions) {
        final StorageDescriptor sd = partition.getSd();
        final StorageDescriptor tableSdCopy = table.getSd().deepCopy();
        if (tableSdCopy.getSerdeInfo() == null) {
            final SerDeInfo serDeInfo = new SerDeInfo(null, null, new HashMap<>());
            tableSdCopy.setSerdeInfo(serDeInfo);
        }

        tableSdCopy.setLocation(sd.getLocation());
        if (!Strings.isNullOrEmpty(sd.getInputFormat())) {
            tableSdCopy.setInputFormat(sd.getInputFormat());
        }
        if (!Strings.isNullOrEmpty(sd.getOutputFormat())) {
            tableSdCopy.setOutputFormat(sd.getOutputFormat());
        }
        if (sd.getParameters() != null && !sd.getParameters().isEmpty()) {
            tableSdCopy.setParameters(sd.getParameters());
        }
        if (sd.getSerdeInfo() != null) {
            if (!Strings.isNullOrEmpty(sd.getSerdeInfo().getName())) {
                tableSdCopy.getSerdeInfo().setName(sd.getSerdeInfo().getName());
            }
            if (!Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())) {
                tableSdCopy.getSerdeInfo().setSerializationLib(sd.getSerdeInfo().getSerializationLib());
            }
            if (sd.getSerdeInfo().getParameters() != null && !sd.getSerdeInfo().getParameters().isEmpty()) {
                tableSdCopy.getSerdeInfo().setParameters(sd.getSerdeInfo().getParameters());
            }
        }
        partition.setSd(tableSdCopy);
    }
}
 
开发者ID:Netflix,项目名称:metacat,代码行数:37,代码来源:HiveConnectorPartitionService.java

示例13: getStorageDescriptor

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private static StorageDescriptor getStorageDescriptor(HiveRegistrationUnit unit) {
  State props = unit.getStorageProps();
  StorageDescriptor sd = new StorageDescriptor();
  sd.setParameters(getParameters(props));
  sd.setCols(getFieldSchemas(unit));
  if (unit.getLocation().isPresent()) {
    sd.setLocation(unit.getLocation().get());
  }
  if (unit.getInputFormat().isPresent()) {
    sd.setInputFormat(unit.getInputFormat().get());
  }
  if (unit.getOutputFormat().isPresent()) {
    sd.setOutputFormat(unit.getOutputFormat().get());
  }
  if (unit.getIsCompressed().isPresent()) {
    sd.setCompressed(unit.getIsCompressed().get());
  }
  if (unit.getNumBuckets().isPresent()) {
    sd.setNumBuckets(unit.getNumBuckets().get());
  }
  if (unit.getBucketColumns().isPresent()) {
    sd.setBucketCols(unit.getBucketColumns().get());
  }
  if (unit.getIsStoredAsSubDirs().isPresent()) {
    sd.setStoredAsSubDirectories(unit.getIsStoredAsSubDirs().get());
  }
  sd.setSerdeInfo(getSerDeInfo(unit));
  return sd;
}
 
开发者ID:apache,项目名称:incubator-gobblin,代码行数:30,代码来源:HiveMetaStoreUtils.java

示例14: createSourceTable

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private void createSourceTable() throws Exception {
  File partitionEurope = new File(sourceTableUri, "local_date=2000-01-01");
  File partitionUk = new File(partitionEurope, "local_hour=0");
  File dataFileUk = new File(partitionUk, PART_00000);
  FileUtils.writeStringToFile(dataFileUk, "1\tadam\tlondon\n2\tsusan\tglasgow\n");

  File partitionAsia = new File(sourceTableUri, "local_date=2000-01-02");
  File partitionChina = new File(partitionAsia, "local_hour=0");
  File dataFileChina = new File(partitionChina, PART_00000);
  String data = "1\tchun\tbeijing\n2\tshanghai\tmilan\n";
  FileUtils.writeStringToFile(dataFileChina, data);

  HiveMetaStoreClient sourceClient = catalog.client();

  Table source = new Table();
  source.setDbName(DATABASE);
  source.setTableName(SOURCE_TABLE);
  source.setTableType(TableType.EXTERNAL_TABLE.name());
  Map<String, String> parameters = new HashMap<>();
  parameters.put("comment", "comment source");
  source.setParameters(parameters);

  List<FieldSchema> partitionColumns = Arrays.asList(new FieldSchema("local_date", "string", ""),
      new FieldSchema("local_hour", "string", ""));
  source.setPartitionKeys(partitionColumns);

  List<FieldSchema> dataColumns = Arrays.asList(new FieldSchema("id", "bigint", ""),
      new FieldSchema("name", "string", ""), new FieldSchema("city", "string", ""));

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(dataColumns);
  sd.setLocation(sourceTableUri.toURI().toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());

  source.setSd(sd);

  sourceClient.createTable(source);
  LOG.info(">>>> Partitions added: {}",
      +sourceClient
          .add_partitions(Arrays.asList(newPartition(SOURCE_TABLE, sd, Arrays.asList("2000-01-01", "0"), partitionUk),
              newPartition(SOURCE_TABLE, sd, Arrays.asList("2000-01-02", "0"), partitionChina))));
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:44,代码来源:ComparisonToolIntegrationTest.java

示例15: createReplicaTable

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private void createReplicaTable() throws Exception {
  File partitionEurope = new File(replicaTableUri, "local_date=2000-01-01");
  File partitionUk = new File(partitionEurope, "local_hour=0");
  File dataFileUk = new File(partitionUk, PART_00000);
  FileUtils.writeStringToFile(dataFileUk, "1\tadam\tlondon\tuk\n2\tsusan\tglasgow\tuk\n");

  File partitionAsia = new File(replicaTableUri, "local_date=2000-01-02");
  File partitionChina = new File(partitionAsia, "local_hour=0");
  File dataFileChina = new File(partitionChina, PART_00000);
  String data = "1\tchun\tbeijing\tchina\n2\tshanghai\tmilan\titaly\n";
  FileUtils.writeStringToFile(dataFileChina, data);

  HiveMetaStoreClient replicaClient = catalog.client();

  Table replica = new Table();
  replica.setDbName(DATABASE);
  replica.setTableName(REPLICA_TABLE);
  replica.setTableType(TableType.EXTERNAL_TABLE.name());
  Map<String, String> parameters = new HashMap<>();
  parameters.put("comment", "comment replica");
  replica.setParameters(parameters);
  List<FieldSchema> partitionColumns = Arrays.asList(new FieldSchema("local_date", "string", ""),
      new FieldSchema("local_hour", "string", ""));
  replica.setPartitionKeys(partitionColumns);

  List<FieldSchema> dataColumns = Arrays.asList(new FieldSchema("id", "bigint", ""),
      new FieldSchema("name", "string", ""), new FieldSchema("city", "string", ""),
      new FieldSchema("country", "string", ""));

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(dataColumns);
  sd.setLocation(replicaTableUri.toURI().toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());

  replica.setSd(sd);

  replicaClient.createTable(replica);
  LOG.info(">>>> Partitions added: {}",
      +replicaClient.add_partitions(
          Arrays.asList(newPartition(REPLICA_TABLE, sd, Arrays.asList("2000-01-01", "0"), partitionUk),
              newPartition(REPLICA_TABLE, sd, Arrays.asList("2000-01-02", "0"), partitionChina))));
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:44,代码来源:ComparisonToolIntegrationTest.java


注:本文中的org.apache.hadoop.hive.metastore.api.StorageDescriptor.setParameters方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。