本文整理汇总了Java中org.apache.hadoop.hive.ql.metadata.Table.setDataLocation方法的典型用法代码示例。如果您正苦于以下问题:Java Table.setDataLocation方法的具体用法?Java Table.setDataLocation怎么用?Java Table.setDataLocation使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.ql.metadata.Table
的用法示例。
在下文中一共展示了Table.setDataLocation方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: constructAvroTable
import org.apache.hadoop.hive.ql.metadata.Table; //导入方法依赖的package包/类
private Table constructAvroTable(String database, String tableName, Schema schema, Partitioner partitioner)
throws HiveMetaStoreException {
Table table = newTable(database, tableName);
table.setTableType(TableType.EXTERNAL_TABLE);
table.getParameters().put("EXTERNAL", "TRUE");
String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
table.setDataLocation(new Path(tablePath));
table.setSerializationLib(avroSerde);
try {
table.setInputFormatClass(avroInputFormat);
table.setOutputFormatClass(avroOutputFormat);
} catch (HiveException e) {
throw new HiveMetaStoreException("Cannot find input/output format:", e);
}
List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
table.setFields(columns);
table.setPartCols(partitioner.partitionFields());
table.getParameters().put(AVRO_SCHEMA_LITERAL, avroData.fromConnectSchema(schema).toString());
return table;
}
示例2: constructParquetTable
import org.apache.hadoop.hive.ql.metadata.Table; //导入方法依赖的package包/类
private Table constructParquetTable(String database, String tableName, Schema schema, Partitioner partitioner) throws HiveMetaStoreException {
Table table = newTable(database, tableName);
table.setTableType(TableType.EXTERNAL_TABLE);
table.getParameters().put("EXTERNAL", "TRUE");
String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
table.setDataLocation(new Path(tablePath));
table.setSerializationLib(getHiveParquetSerde());
try {
table.setInputFormatClass(getHiveParquetInputFormat());
table.setOutputFormatClass(getHiveParquetOutputFormat());
} catch (HiveException e) {
throw new HiveMetaStoreException("Cannot find input/output format:", e);
}
// convert copycat schema schema to Hive columns
List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
table.setFields(columns);
table.setPartCols(partitioner.partitionFields());
return table;
}
示例3: constructAvroTable
import org.apache.hadoop.hive.ql.metadata.Table; //导入方法依赖的package包/类
private Table constructAvroTable(String database, String tableName, Schema schema, Partitioner partitioner)
throws HiveMetaStoreException {
Table table = new Table(database, tableName);
table.setTableType(TableType.EXTERNAL_TABLE);
table.getParameters().put("EXTERNAL", "TRUE");
String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
table.setDataLocation(new Path(tablePath));
table.setSerializationLib(avroSerde);
try {
table.setInputFormatClass(avroInputFormat);
table.setOutputFormatClass(avroOutputFormat);
} catch (HiveException e) {
throw new HiveMetaStoreException("Cannot find input/output format:", e);
}
List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
table.setFields(columns);
table.setPartCols(partitioner.partitionFields());
table.getParameters().put(AVRO_SCHEMA_LITERAL, avroData.fromConnectSchema(schema).toString());
return table;
}
示例4: constructParquetTable
import org.apache.hadoop.hive.ql.metadata.Table; //导入方法依赖的package包/类
private Table constructParquetTable(String database, String tableName, Schema schema, Partitioner partitioner) throws HiveMetaStoreException {
Table table = new Table(database, tableName);
table.setTableType(TableType.EXTERNAL_TABLE);
table.getParameters().put("EXTERNAL", "TRUE");
String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
table.setDataLocation(new Path(tablePath));
table.setSerializationLib(getHiveParquetSerde());
try {
table.setInputFormatClass(getHiveParquetInputFormat());
table.setOutputFormatClass(getHiveParquetOutputFormat());
} catch (HiveException e) {
throw new HiveMetaStoreException("Cannot find input/output format:", e);
}
// convert copycat schema schema to Hive columns
List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
table.setFields(columns);
table.setPartCols(partitioner.partitionFields());
return table;
}
示例5: getTargetTable
import org.apache.hadoop.hive.ql.metadata.Table; //导入方法依赖的package包/类
private Table getTargetTable(Table originTable, Path targetLocation) throws IOException {
try {
Table targetTable = originTable.copy();
targetTable.setDbName(this.targetDatabase);
targetTable.setDataLocation(targetLocation);
/*
* Need to set the table owner as the flow executor
*/
targetTable.setOwner(UserGroupInformation.getCurrentUser().getShortUserName());
targetTable.getTTable().putToParameters(HiveDataset.REGISTERER, GOBBLIN_DISTCP);
targetTable.getTTable().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
Long.toString(this.startTime));
targetTable.getTTable().unsetCreateTime();
HiveAvroCopyEntityHelper.updateTableAttributesIfAvro(targetTable, this);
return targetTable;
} catch (HiveException he) {
throw new IOException(he);
}
}
示例6: createTestTable
import org.apache.hadoop.hive.ql.metadata.Table; //导入方法依赖的package包/类
private Table createTestTable(String databaseName, String tableName) throws HiveException {
Table table = new Table(databaseName, tableName);
table.setInputFormatClass(TextInputFormat.class);
table.setFields(new ArrayList<FieldSchema>() {{
add(new FieldSchema("col1", "string", "comment1"));
}
});
table.setTableType(TableType.EXTERNAL_TABLE);
table.setDataLocation(new Path("somehdfspath"));
return table;
}