本文整理汇总了Java中org.apache.hadoop.hive.metastore.api.StorageDescriptor.setLocation方法的典型用法代码示例。如果您正苦于以下问题:Java StorageDescriptor.setLocation方法的具体用法?Java StorageDescriptor.setLocation怎么用?Java StorageDescriptor.setLocation使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.metastore.api.StorageDescriptor
的用法示例。
在下文中一共展示了StorageDescriptor.setLocation方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createPartitionedTable
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
static Table createPartitionedTable(HiveMetaStoreClient metaStoreClient, String database, String table, File location)
throws Exception {
Table hiveTable = new Table();
hiveTable.setDbName(database);
hiveTable.setTableName(table);
hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
hiveTable.putToParameters("EXTERNAL", "TRUE");
hiveTable.setPartitionKeys(PARTITION_COLUMNS);
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setLocation(location.toURI().toString());
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
hiveTable.setSd(sd);
metaStoreClient.createTable(hiveTable);
return hiveTable;
}
示例2: createUnpartitionedTable
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
static Table createUnpartitionedTable(
HiveMetaStoreClient metaStoreClient,
String database,
String table,
File location)
throws TException {
Table hiveTable = new Table();
hiveTable.setDbName(database);
hiveTable.setTableName(table);
hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
hiveTable.putToParameters("EXTERNAL", "TRUE");
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(DATA_COLUMNS);
sd.setLocation(location.toURI().toString());
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
hiveTable.setSd(sd);
metaStoreClient.createTable(hiveTable);
return hiveTable;
}
示例3: addTestPartition
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public Partition addTestPartition(Table tbl, List<String> values, int createTime) throws Exception {
StorageDescriptor partitionSd = new StorageDescriptor();
if (StringUtils.isNotBlank(tbl.getSd().getLocation())) {
partitionSd.setLocation(tbl.getSd().getLocation() + values);
} else {
partitionSd.setLocation("/tmp/" + tbl.getTableName() + "/part1");
}
partitionSd.setSerdeInfo(
new SerDeInfo("name", "serializationLib", ImmutableMap.of(HiveAvroSerDeManager.SCHEMA_URL, "/tmp/dummy")));
partitionSd.setCols(tbl.getPartitionKeys());
Partition partition =
new Partition(values, tbl.getDbName(), tbl.getTableName(), 1, 1, partitionSd, new HashMap<String, String>());
partition.setCreateTime(createTime);
return this.getLocalMetastoreClient().add_partition(partition);
}
示例4: newTable
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private static Table newTable(String databaseName, String tableName, String location) {
Table table = new Table();
table.setDbName(databaseName);
table.setTableName(tableName);
table.setParameters(new HashMap<String, String>());
table.setPartitionKeys(Arrays.asList(new FieldSchema("a", "string", null)));
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation(location);
table.setSd(sd);
return table;
}
示例5: newPartition
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private static Partition newPartition(String databaseName, String tableName, String location) {
Partition partition = new Partition();
partition.setDbName(databaseName);
partition.setTableName(tableName);
partition.setParameters(new HashMap<String, String>());
partition.setValues(Arrays.asList("01"));
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation(location);
partition.setSd(sd);
return partition;
}
示例6: newPartition
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private Partition newPartition(String... values) {
Partition partition = new Partition();
partition.setDbName(DB_NAME);
partition.setTableName(TABLE_NAME);
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation(new Path(tableLocation, partitionName(values)).toUri().toString());
sd.setCols(FIELDS);
partition.setSd(sd);
HashMap<String, String> parameters = new HashMap<>();
parameters.put(StatsSetupConst.ROW_COUNT, "1");
partition.setParameters(parameters);
partition.setValues(Arrays.asList(values));
return partition;
}
示例7: setupTable
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Before
public void setupTable() {
sourceTable = new Table();
sourceTable.setDbName(DB_NAME);
sourceTable.setTableName(TABLE_NAME);
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation(TABLE_LOCATION);
sourceTable.setSd(sd);
}
示例8: calculateSubPaths
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Test
public void calculateSubPaths() {
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation("/a/partition1");
partition1.setSd(sd);
List<Path> subPaths = HdfsSnapshotLocationManager.calculateSubPaths(Collections.singletonList(partition1), "/a",
"/b");
assertThat(subPaths.get(0).toString(), is("/b/partition1"));
}
示例9: newPartition
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public static Partition newPartition(String database, String tableName, String partitionValue) {
Partition partition = new Partition();
partition.setDbName(database);
partition.setTableName(tableName);
partition.setCreateTime(CREATE_TIME);
partition.setValues(ImmutableList.of(partitionValue));
Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
privileges.setUserPrivileges(userPrivileges);
partition.setPrivileges(privileges);
StorageDescriptor storageDescriptor = new StorageDescriptor();
storageDescriptor.setCols(COLS);
storageDescriptor.setInputFormat(INPUT_FORMAT);
storageDescriptor.setOutputFormat(OUTPUT_FORMAT);
storageDescriptor.setSerdeInfo(new SerDeInfo(SERDE_INFO_NAME, SERIALIZATION_LIB, new HashMap<String, String>()));
storageDescriptor.setSkewedInfo(new SkewedInfo());
storageDescriptor.setParameters(new HashMap<String, String>());
storageDescriptor.setLocation(DATABASE + "/" + tableName + "/" + partitionValue + "/");
partition.setSd(storageDescriptor);
Map<String, String> parameters = new HashMap<>();
parameters.put("com.company.parameter", "abc");
partition.setParameters(parameters);
return partition;
}
示例10: calculateSubPathsUriEncodedPathAndPartition
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Test
public void calculateSubPathsUriEncodedPathAndPartition() {
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation("hdfs://sandboxcluster/a%25b/partition1=url%25encoded.%3A");
partition1.setSd(sd);
List<Path> subPaths = HdfsSnapshotLocationManager.calculateSubPaths(Collections.singletonList(partition1),
"hdfs://sandboxcluster/a%25b", "/b%25c");
assertThat(subPaths.get(0).toString(), is("/b%25c/partition1=url%25encoded.%3A"));
}
示例11: calculateSubPathsTrailingSlash
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Test
public void calculateSubPathsTrailingSlash() {
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation("/a/partition1");
partition1.setSd(sd);
List<Path> subPaths = HdfsSnapshotLocationManager.calculateSubPaths(Collections.singletonList(partition1), "/a/",
"/b");
assertThat(subPaths.get(0).toString(), is("/b/partition1"));
}
示例12: partitionSubPath
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Test
public void partitionSubPath() throws Exception {
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation(PARTITION_BASE_LOCATION + "/partition1");
partition1.setSd(sd);
HdfsSnapshotLocationManager manager = new HdfsSnapshotLocationManager(hiveConf, EVENT_ID, sourceTable,
Arrays.asList(partition1), false, PARTITION_BASE_LOCATION, fileSystemFactory, sourceCatalogListener);
assertThat(manager.getPartitionSubPath(new Path(partition1.getSd().getLocation())), is(new Path("partition1")));
}
示例13: partitionSubPathUriEncoded
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Test
public void partitionSubPathUriEncoded() throws Exception {
Path path = new Path(PARTITION_BASE_LOCATION + "/partition%251");
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation(path.toUri().getPath());
partition1.setSd(sd);
HdfsSnapshotLocationManager manager = new HdfsSnapshotLocationManager(hiveConf, EVENT_ID, sourceTable,
Arrays.asList(partition1), false, PARTITION_BASE_LOCATION, fileSystemFactory, sourceCatalogListener);
assertThat(manager.getPartitionSubPath(path), is(new Path("partition%251")));
}
示例14: invalidPartitionSubPath
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Test(expected = CircusTrainException.class)
public void invalidPartitionSubPath() throws Exception {
StorageDescriptor sd = new StorageDescriptor();
sd.setLocation("anotherBaseLocation" + "/partition1");
partition1.setSd(sd);
HdfsSnapshotLocationManager manager = new HdfsSnapshotLocationManager(hiveConf, EVENT_ID, sourceTable,
Arrays.asList(partition1), false, PARTITION_BASE_LOCATION, fileSystemFactory, sourceCatalogListener);
manager.getPartitionSubPath(new Path(partition1.getSd().getLocation()));
}
示例15: init
import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
@Before
public void init() {
table = new Table();
table.setDbName("database");
table.setTableName("table");
table.setTableType("type");
Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
privileges.setUserPrivileges(userPrivileges);
table.setPrivileges(privileges);
StorageDescriptor storageDescriptor = new StorageDescriptor();
storageDescriptor.setCols(Arrays.asList(new FieldSchema("a", "int", null)));
storageDescriptor.setInputFormat("input_format");
storageDescriptor.setOutputFormat("output_format");
storageDescriptor.setSerdeInfo(new SerDeInfo("serde", "lib", new HashMap<String, String>()));
storageDescriptor.setSkewedInfo(new SkewedInfo());
storageDescriptor.setParameters(new HashMap<String, String>());
storageDescriptor.setLocation("database/table/");
table.setSd(storageDescriptor);
Map<String, String> parameters = new HashMap<>();
parameters.put("com.company.parameter", "abc");
table.setParameters(parameters);
}