当前位置: 首页>>代码示例>>Java>>正文


Java Table.setTableType方法代码示例

本文整理汇总了Java中org.apache.hadoop.hive.ql.metadata.Table.setTableType方法的典型用法代码示例。如果您正苦于以下问题:Java Table.setTableType方法的具体用法?Java Table.setTableType怎么用?Java Table.setTableType使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hive.ql.metadata.Table的用法示例。


在下文中一共展示了Table.setTableType方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: constructAvroTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入方法依赖的package包/类
private Table constructAvroTable(String database, String tableName, Schema schema, Partitioner partitioner)
    throws HiveMetaStoreException {
  Table table = newTable(database, tableName);
  table.setTableType(TableType.EXTERNAL_TABLE);
  table.getParameters().put("EXTERNAL", "TRUE");
  String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
  table.setDataLocation(new Path(tablePath));
  table.setSerializationLib(avroSerde);
  try {
    table.setInputFormatClass(avroInputFormat);
    table.setOutputFormatClass(avroOutputFormat);
  } catch (HiveException e) {
    throw new HiveMetaStoreException("Cannot find input/output format:", e);
  }
  List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
  table.setFields(columns);
  table.setPartCols(partitioner.partitionFields());
  table.getParameters().put(AVRO_SCHEMA_LITERAL, avroData.fromConnectSchema(schema).toString());
  return table;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:21,代码来源:AvroHiveUtil.java

示例2: constructParquetTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入方法依赖的package包/类
private Table constructParquetTable(String database, String tableName, Schema schema, Partitioner partitioner) throws HiveMetaStoreException {
  Table table = newTable(database, tableName);
  table.setTableType(TableType.EXTERNAL_TABLE);
  table.getParameters().put("EXTERNAL", "TRUE");
  String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
  table.setDataLocation(new Path(tablePath));
  table.setSerializationLib(getHiveParquetSerde());
  try {
    table.setInputFormatClass(getHiveParquetInputFormat());
    table.setOutputFormatClass(getHiveParquetOutputFormat());
  } catch (HiveException e) {
    throw new HiveMetaStoreException("Cannot find input/output format:", e);
  }
  // convert copycat schema schema to Hive columns
  List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
  table.setFields(columns);
  table.setPartCols(partitioner.partitionFields());
  return table;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:20,代码来源:ParquetHiveUtil.java

示例3: constructAvroTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入方法依赖的package包/类
private Table constructAvroTable(String database, String tableName, Schema schema, Partitioner partitioner)
    throws HiveMetaStoreException {
  Table table = new Table(database, tableName);
  table.setTableType(TableType.EXTERNAL_TABLE);
  table.getParameters().put("EXTERNAL", "TRUE");
  String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
  table.setDataLocation(new Path(tablePath));
  table.setSerializationLib(avroSerde);
  try {
    table.setInputFormatClass(avroInputFormat);
    table.setOutputFormatClass(avroOutputFormat);
  } catch (HiveException e) {
    throw new HiveMetaStoreException("Cannot find input/output format:", e);
  }
  List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
  table.setFields(columns);
  table.setPartCols(partitioner.partitionFields());
  table.getParameters().put(AVRO_SCHEMA_LITERAL, avroData.fromConnectSchema(schema).toString());
  return table;
}
 
开发者ID:qubole,项目名称:streamx,代码行数:21,代码来源:AvroHiveUtil.java

示例4: constructParquetTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入方法依赖的package包/类
private Table constructParquetTable(String database, String tableName, Schema schema, Partitioner partitioner) throws HiveMetaStoreException {
  Table table = new Table(database, tableName);
  table.setTableType(TableType.EXTERNAL_TABLE);
  table.getParameters().put("EXTERNAL", "TRUE");
  String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
  table.setDataLocation(new Path(tablePath));
  table.setSerializationLib(getHiveParquetSerde());
  try {
    table.setInputFormatClass(getHiveParquetInputFormat());
    table.setOutputFormatClass(getHiveParquetOutputFormat());
  } catch (HiveException e) {
    throw new HiveMetaStoreException("Cannot find input/output format:", e);
  }
  // convert copycat schema schema to Hive columns
  List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
  table.setFields(columns);
  table.setPartCols(partitioner.partitionFields());
  return table;
}
 
开发者ID:qubole,项目名称:streamx,代码行数:20,代码来源:ParquetHiveUtil.java

示例5: createHiveTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入方法依赖的package包/类
/**
 * Creates the hive table.
 *
 * @param tableName the table name
 * @throws HiveException the hive exception
 */
public static void createHiveTable(String tableName, Map<String, String> parameters) throws HiveException {
  List<FieldSchema> columns = new ArrayList<FieldSchema>();
  columns.add(new FieldSchema("col1", "string", ""));
  List<FieldSchema> partCols = new ArrayList<FieldSchema>();
  partCols.add(new FieldSchema("pcol1", "string", ""));
  Map<String, String> params = new HashMap<String, String>();
  params.put("test.hive.table.prop", "tvalue");
  if (null != parameters && !parameters.isEmpty()) {
    params.putAll(parameters);
  }
  Table tbl = Hive.get().newTable(tableName);
  tbl.setTableType(TableType.MANAGED_TABLE);
  tbl.getTTable().getSd().setCols(columns);
  tbl.setPartCols(partCols);
  tbl.getTTable().getParameters().putAll(params);
  Hive.get().createTable(tbl);
}
 
开发者ID:apache,项目名称:lens,代码行数:24,代码来源:LensServerTestUtil.java

示例6: createTestTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入方法依赖的package包/类
private Table createTestTable(String databaseName, String tableName) throws HiveException {
    Table table = new Table(databaseName, tableName);
    table.setInputFormatClass(TextInputFormat.class);
    table.setFields(new ArrayList<FieldSchema>() {{
        add(new FieldSchema("col1", "string", "comment1"));
    }
    });
    table.setTableType(TableType.EXTERNAL_TABLE);
    table.setDataLocation(new Path("somehdfspath"));
    return table;
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:12,代码来源:HiveMetaStoreBridgeTest.java

示例7: createCubeHiveTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入方法依赖的package包/类
private Table createCubeHiveTable(AbstractCubeTable table) throws LensException {
  try {
    Table tbl = getClient().newTable(table.getName().toLowerCase());
    tbl.setTableType(TableType.MANAGED_TABLE);
    tbl.getTTable().getSd().setCols(table.getColumns());
    tbl.getTTable().getParameters().putAll(table.getProperties());
    getClient().createTable(tbl);
    // do get to update cache
    getTable(tbl.getTableName());
    return tbl;
  } catch (Exception e) {
    throw new LensException("Exception creating table", e);
  }
}
 
开发者ID:apache,项目名称:lens,代码行数:15,代码来源:CubeMetastoreClient.java

示例8: createTempMetastoreTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入方法依赖的package包/类
/**
 * Creates the temp metastore table.
 *
 * @param dataLocation the data location
 * @param metadata     the metadata
 * @return the string
 * @throws HiveException the hive exception
 */
protected String createTempMetastoreTable(String dataLocation, QueryResultSetMetadata metadata) throws HiveException {
  String tableName = "lens_rdd_" + UUID.randomUUID().toString().replace("-", "_");

  Hive hiveClient = Hive.get(HIVE_CONF);
  Table tbl = hiveClient.newTable("default." + tableName);
  tbl.setTableType(TableType.MANAGED_TABLE);
  tbl.setInputFormatClass(INPUT_FORMAT);
  // String outputFormat = null;
  // tbl.setOutputFormatClass(outputFormat);

  // Add columns
  for (ResultColumn rc : metadata.getColumns()) {
    tbl.getCols().add(new FieldSchema(rc.getName(), toHiveType(rc.getType()), "default"));
    System.out.println("@@@@ COL " + rc.getName() + " TYPE " + toHiveType(rc.getType()));
  }

  tbl.getPartCols().add(new FieldSchema(TEMP_TABLE_PART_COL, "string", "default"));
  hiveClient.createTable(tbl);

  log.info("Table {} created", tableName);

  // Add partition to the table
  AddPartitionDesc partitionDesc = new AddPartitionDesc("default", tableName, false);
  Map<String, String> partSpec = new HashMap<String, String>();
  partSpec.put(TEMP_TABLE_PART_COL, TEMP_TABLE_PART_VAL);
  partitionDesc.addPartition(partSpec, dataLocation);
  hiveClient.createPartitions(partitionDesc);
  log.info("Created partition in {} for data in {}", tableName, dataLocation);

  return tableName;
}
 
开发者ID:apache,项目名称:lens,代码行数:40,代码来源:LensRDDClient.java

示例9: createTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入方法依赖的package包/类
public void createTable(String tableName, String dataFile) throws HiveException {

    File filedataFile = new File(dataFile);
    Path dataFilePath = new Path(filedataFile.toURI());
    Path partDir = dataFilePath.getParent();

    // Create table
    List<FieldSchema> columns = new ArrayList<FieldSchema>();

    // Label is optional. Not used for unsupervised models.
    // If present, label will be the first column, followed by features
    if (labelColumn != null) {
      columns.add(new FieldSchema(labelColumn, "double", "Labelled Column"));
    }

    for (String feature : features) {
      columns.add(new FieldSchema(feature, "double", "Feature " + feature));
    }

    Table tbl = Hive.get(conf).newTable(database + "." + tableName);
    tbl.setTableType(TableType.MANAGED_TABLE);
    tbl.getTTable().getSd().setCols(columns);
    // tbl.getTTable().getParameters().putAll(new HashMap<String, String>());
    tbl.setInputFormatClass(TextInputFormat.class);
    tbl.setSerdeParam(serdeConstants.LINE_DELIM, "\n");
    tbl.setSerdeParam(serdeConstants.FIELD_DELIM, " ");

    List<FieldSchema> partCols = new ArrayList<FieldSchema>(1);
    partCols.add(new FieldSchema("dummy_partition_col", "string", ""));
    tbl.setPartCols(partCols);

    Hive.get(conf).dropTable(database, tableName, false, true);
    Hive.get(conf).createTable(tbl, true);
    log.info("Created table {}", tableName);

    // Add partition for the data file
    AddPartitionDesc partitionDesc = new AddPartitionDesc(database, tableName,
        false);
    Map<String, String> partSpec = new HashMap<String, String>();
    partSpec.put("dummy_partition_col", "dummy_val");
    partitionDesc.addPartition(partSpec, partDir.toUri().toString());
    Hive.get(conf).createPartitions(partitionDesc);
    log.info("{}: Added partition {}", tableName, partDir.toUri().toString());
  }
 
开发者ID:apache,项目名称:lens,代码行数:45,代码来源:MLRunner.java

示例10: createTable

import org.apache.hadoop.hive.ql.metadata.Table; //导入方法依赖的package包/类
/**
 * Creates the example table.
 *
 * @param conf           the conf
 * @param database       the database
 * @param tableName      the table name
 * @param sampleDataFile the sample data file
 * @param labelColumn    the label column
 * @param features       the features
 * @throws HiveException the hive exception
 */
public static void createTable(HiveConf conf, String database, String tableName, String sampleDataFile,
  String labelColumn, Map<String, String> tableParams, String... features) throws HiveException {

  Path dataFilePath = new Path(sampleDataFile);
  Path partDir = dataFilePath.getParent();

  // Create table
  List<FieldSchema> columns = new ArrayList<FieldSchema>();

  // Label is optional. Not used for unsupervised models.
  // If present, label will be the first column, followed by features
  if (labelColumn != null) {
    columns.add(new FieldSchema(labelColumn, "double", "Labelled Column"));
  }

  for (String feature : features) {
    columns.add(new FieldSchema(feature, "double", "Feature " + feature));
  }

  Table tbl = Hive.get(conf).newTable(database + "." + tableName);
  tbl.setTableType(TableType.MANAGED_TABLE);
  tbl.getTTable().getSd().setCols(columns);
  tbl.getTTable().getParameters().putAll(tableParams);
  tbl.setInputFormatClass(TextInputFormat.class);
  tbl.setSerdeParam(serdeConstants.LINE_DELIM, "\n");
  tbl.setSerdeParam(serdeConstants.FIELD_DELIM, " ");

  List<FieldSchema> partCols = new ArrayList<FieldSchema>(1);
  partCols.add(new FieldSchema("dummy_partition_col", "string", ""));
  tbl.setPartCols(partCols);

  Hive.get(conf).createTable(tbl, false);
  log.info("Created table {}", tableName);

  // Add partition for the data file
  AddPartitionDesc partitionDesc = new AddPartitionDesc(database, tableName, false);
  Map<String, String> partSpec = new HashMap<String, String>();
  partSpec.put("dummy_partition_col", "dummy_val");
  partitionDesc.addPartition(partSpec, partDir.toUri().toString());
  Hive.get(conf).createPartitions(partitionDesc);
  log.info("{}: Added partition {}", tableName, partDir.toUri().toString());
}
 
开发者ID:apache,项目名称:lens,代码行数:54,代码来源:ExampleUtils.java


注:本文中的org.apache.hadoop.hive.ql.metadata.Table.setTableType方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。