本文整理汇总了Java中org.apache.hadoop.hive.metastore.api.Table.setDbName方法的典型用法代码示例。如果您正苦于以下问题:Java Table.setDbName方法的具体用法?Java Table.setDbName怎么用?Java Table.setDbName使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.metastore.api.Table
的用法示例。
在下文中一共展示了Table.setDbName方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createPartitionedTable
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
private Table createPartitionedTable(String databaseName, String tableName) throws Exception {
Table table = new Table();
table.setDbName(DATABASE);
table.setTableName(tableName);
table.setPartitionKeys(Arrays.asList(new FieldSchema("partcol", "int", null)));
table.setSd(new StorageDescriptor());
table.getSd().setCols(Arrays.asList(new FieldSchema("id", "int", null), new FieldSchema("name", "string", null)));
table.getSd().setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
table.getSd().setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat");
table.getSd().setSerdeInfo(new SerDeInfo());
table.getSd().getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
HiveMetaStoreClient client = server.newClient();
client.createTable(table);
client.close();
return table;
}
示例2: newTable
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
private Table newTable() {
Table table = new Table();
table.setDbName(DB_NAME);
table.setTableName(TABLE_NAME);
table.setTableType(TableType.EXTERNAL_TABLE.name());
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation(tableLocation);
table.setSd(sd);
HashMap<String, String> parameters = new HashMap<>();
parameters.put(StatsSetupConst.ROW_COUNT, "1");
table.setParameters(parameters);
table.setPartitionKeys(PARTITIONS);
return table;
}
示例3: createView
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
private static Table createView(
HiveMetaStoreClient metaStoreClient,
String database,
String view,
String table,
List<FieldSchema> partitionCols)
throws TException {
Table hiveView = new Table();
hiveView.setDbName(database);
hiveView.setTableName(view);
hiveView.setTableType(TableType.VIRTUAL_VIEW.name());
hiveView.setViewOriginalText(hql(database, table));
hiveView.setViewExpandedText(expandHql(database, table, DATA_COLUMNS, partitionCols));
hiveView.setPartitionKeys(partitionCols);
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
hiveView.setSd(sd);
metaStoreClient.createTable(hiveView);
return hiveView;
}
示例4: createUnpartitionedTable
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
private Table createUnpartitionedTable(String databaseName, String tableName) throws Exception {
Table table = new Table();
table.setDbName(databaseName);
table.setTableName(tableName);
table.setSd(new StorageDescriptor());
table.getSd().setCols(Arrays.asList(new FieldSchema("id", "int", null), new FieldSchema("name", "string", null)));
table.getSd().setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
table.getSd().setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat");
table.getSd().setSerdeInfo(new SerDeInfo());
table.getSd().getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
HiveMetaStoreClient client = server.newClient();
client.createTable(table);
client.close();
return table;
}
示例5: get_table_objects_by_name_req
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
@Test
public void get_table_objects_by_name_req()
throws MetaException, InvalidOperationException, UnknownDBException, TException {
Table table0 = new Table();
table0.setDbName(DB_P);
table0.setTableName("table0");
Table table1 = new Table();
table1.setDbName(DB_P);
table1.setTableName("table1");
GetTablesRequest request = new GetTablesRequest(DB_P);
request.setTblNames(Arrays.asList(table0.getTableName(), table1.getTableName()));
GetTablesResult response = new GetTablesResult(Arrays.asList(table0, table1));
when(primaryClient.get_table_objects_by_name_req(request)).thenReturn(response);
when(primaryMapping.transformInboundGetTablesRequest(request)).thenReturn(request);
when(primaryMapping.transformOutboundGetTablesResult(response)).thenReturn(response);
GetTablesResult result = handler.get_table_objects_by_name_req(request);
assertThat(result.getTables().size(), is(2));
assertThat(result.getTables().get(0).getDbName(), is(DB_P));
assertThat(result.getTables().get(0).getTableName(), is("table0"));
assertThat(result.getTables().get(1).getDbName(), is(DB_P));
assertThat(result.getTables().get(1).getTableName(), is("table1"));
}
示例6: transformOutboundGetTablesResult
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
@Test
public void transformOutboundGetTablesResult() throws Exception {
Table table = new Table();
table.setDbName(DB_NAME);
table.setTableName(TABLE_NAME);
GetTablesResult result = new GetTablesResult();
result.setTables(Arrays.asList(table));
GetTablesResult transformedResult = databaseMapping.transformOutboundGetTablesResult(result);
assertThat(transformedResult, is(sameInstance(result)));
assertThat(transformedResult, is(result));
}
示例7: alter_table_with_environment_context
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
@Test
public void alter_table_with_environment_context() throws InvalidOperationException, MetaException, TException {
EnvironmentContext environmentContext = new EnvironmentContext();
Table table = new Table();
table.setDbName(DB_P);
Table inbound = new Table();
when(primaryMapping.transformInboundDatabaseName(DB_P)).thenReturn("inbound");
when(primaryMapping.transformInboundTable(table)).thenReturn(inbound);
handler.alter_table_with_environment_context(DB_P, "table", table, environmentContext);
verify(primaryMapping, times(2)).checkWritePermissions(DB_P);
verify(primaryClient).alter_table_with_environment_context("inbound", "table", inbound, environmentContext);
}
示例8: setupTable
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
@Before
public void setupTable() {
sourceTable = new Table();
sourceTable.setDbName(DB_NAME);
sourceTable.setTableName(TABLE_NAME);
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation(TABLE_LOCATION);
sourceTable.setSd(sd);
}
示例9: newTable
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
public static Table newTable(String name, String dbName, List<FieldSchema> partitionKeys, StorageDescriptor sd) {
Table table = new Table();
table.setTableName(name);
table.setDbName(dbName);
table.setSd(sd);
table.setPartitionKeys(partitionKeys);
table.setTableType(TableType.EXTERNAL_TABLE.name());
table.setParameters(new HashMap<String, String>());
return table;
}
示例10: alter_table
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
@Test
public void alter_table() throws InvalidOperationException, MetaException, TException {
Table table = new Table();
table.setDbName(DB_P);
Table inbound = new Table();
when(primaryMapping.transformInboundDatabaseName(DB_P)).thenReturn("inbound");
when(primaryMapping.transformInboundTable(table)).thenReturn(inbound);
handler.alter_table(DB_P, "table", table);
verify(primaryMapping, times(2)).checkWritePermissions(DB_P);
verify(primaryClient).alter_table("inbound", "table", inbound);
}
示例11: createPartitionedTable
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
public static Table createPartitionedTable(
HiveMetaStoreClient metaStoreClient,
String database,
String table,
URI location)
throws Exception {
Table hiveTable = new Table();
hiveTable.setDbName(database);
hiveTable.setTableName(table);
hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
hiveTable.putToParameters("EXTERNAL", "TRUE");
hiveTable.setPartitionKeys(PARTITION_COLUMNS);
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setLocation(location.toString());
sd.setParameters(new HashMap<String, String>());
sd.setInputFormat(TextInputFormat.class.getName());
sd.setOutputFormat(TextOutputFormat.class.getName());
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.OpenCSVSerde");
hiveTable.setSd(sd);
metaStoreClient.createTable(hiveTable);
ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, database, table);
ColumnStatisticsData statsData = new ColumnStatisticsData(_Fields.LONG_STATS, new LongColumnStatsData(1L, 2L));
ColumnStatisticsObj cso1 = new ColumnStatisticsObj("id", "bigint", statsData);
List<ColumnStatisticsObj> statsObj = Collections.singletonList(cso1);
metaStoreClient.updateTableColumnStatistics(new ColumnStatistics(statsDesc, statsObj));
return hiveTable;
}
示例12: transformOutboundTable
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
@Test
public void transformOutboundTable() throws Exception {
Table table = new Table();
table.setDbName(DB_NAME);
Table result = databaseMapping.transformOutboundTable(table);
assertThat(result, is(sameInstance(table)));
assertThat(result.getDbName(), is(OUT_DB_NAME));
}
示例13: newTable
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
private static Table newTable(String databaseName, String tableName, String location) {
Table table = new Table();
table.setDbName(databaseName);
table.setTableName(tableName);
table.setParameters(new HashMap<String, String>());
table.setPartitionKeys(Arrays.asList(new FieldSchema("a", "string", null)));
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation(location);
table.setSd(sd);
return table;
}
示例14: newTable
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
public static Table newTable(String database, String tableName) {
Table table = new Table();
table.setDbName(database);
table.setTableName(tableName);
table.setTableType(TABLE_TYPE);
table.setOwner(OWNER);
table.setCreateTime(CREATE_TIME);
table.setRetention(RETENTION);
Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
privileges.setUserPrivileges(userPrivileges);
table.setPrivileges(privileges);
StorageDescriptor storageDescriptor = new StorageDescriptor();
storageDescriptor.setCols(COLS);
storageDescriptor.setInputFormat(INPUT_FORMAT);
storageDescriptor.setOutputFormat(OUTPUT_FORMAT);
storageDescriptor.setSerdeInfo(new SerDeInfo(SERDE_INFO_NAME, SERIALIZATION_LIB, new HashMap<String, String>()));
storageDescriptor.setSkewedInfo(new SkewedInfo());
storageDescriptor.setParameters(new HashMap<String, String>());
storageDescriptor.setLocation(DATABASE + "/" + tableName + "/");
table.setSd(storageDescriptor);
Map<String, String> parameters = new HashMap<>();
parameters.put("com.company.parameter", "abc");
table.setParameters(parameters);
return table;
}
示例15: init
import org.apache.hadoop.hive.metastore.api.Table; //导入方法依赖的package包/类
@Before
public void init() {
table = new Table();
table.setDbName("database");
table.setTableName("table");
table.setTableType("type");
Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
privileges.setUserPrivileges(userPrivileges);
table.setPrivileges(privileges);
StorageDescriptor storageDescriptor = new StorageDescriptor();
storageDescriptor.setCols(Arrays.asList(new FieldSchema("a", "int", null)));
storageDescriptor.setInputFormat("input_format");
storageDescriptor.setOutputFormat("output_format");
storageDescriptor.setSerdeInfo(new SerDeInfo("serde", "lib", new HashMap<String, String>()));
storageDescriptor.setSkewedInfo(new SkewedInfo());
storageDescriptor.setParameters(new HashMap<String, String>());
storageDescriptor.setLocation("database/table/");
table.setSd(storageDescriptor);
Map<String, String> parameters = new HashMap<>();
parameters.put("com.company.parameter", "abc");
table.setParameters(parameters);
}