本文整理汇总了Java中org.apache.hadoop.hive.metastore.api.Order类的典型用法代码示例。如果您正苦于以下问题:Java Order类的具体用法?Java Order怎么用?Java Order使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Order类属于org.apache.hadoop.hive.metastore.api包,在下文中一共展示了Order类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: allShortCircuit
import org.apache.hadoop.hive.metastore.api.Order; //导入依赖的package包/类
@Test
public void allShortCircuit() {
left.getPartition().getParameters().put("com.company.key", "value");
left.getPartition().setValues(ImmutableList.of("p1", "p2"));
List<PrivilegeGrantInfo> privilege = ImmutableList.of(new PrivilegeGrantInfo());
left.getPartition().setPrivileges(new PrincipalPrivilegeSet(ImmutableMap.of("write", privilege), null, null));
left.getPartition().getSd().setLocation("left");
left.getPartition().getSd().setInputFormat("LeftInputFormat");
left.getPartition().getSd().setOutputFormat("LeftOutputFormat");
left.getPartition().getSd().getParameters().put("com.company.key", "value");
left.getPartition().getSd().getSerdeInfo().setName("left serde info");
left.getPartition().getSd().getSkewedInfo().setSkewedColNames(ImmutableList.of("left skewed col"));
left.getPartition().getSd().setCols(ImmutableList.of(new FieldSchema("left", "type", "comment")));
left.getPartition().getSd().setSortCols(ImmutableList.of(new Order()));
left.getPartition().getSd().setBucketCols(ImmutableList.of("bucket"));
left.getPartition().getSd().setNumBuckets(9000);
List<Diff<Object, Object>> diffs = newPartitionAndMetadataComparator(SHORT_CIRCUIT).compare(left, right);
assertThat(diffs, is(notNullValue()));
assertThat(diffs.size(), is(1));
assertThat(diffs.get(0), is(newPropertyDiff(PartitionAndMetadata.class, "partition.parameters",
left.getPartition().getParameters(), right.getPartition().getParameters())));
}
示例2: StorageDescriptorWrapper
import org.apache.hadoop.hive.metastore.api.Order; //导入依赖的package包/类
public StorageDescriptorWrapper(StorageDescriptor sd) {
this.sd = sd;
this.cols = Lists.newArrayList();
for (FieldSchema f : sd.getCols()) {
this.cols.add(new FieldSchemaWrapper(f));
}
this.location = sd.getLocation();
this.inputFormat = sd.getInputFormat();
this.outputFormat = sd.getOutputFormat();
this.compressed = sd.isCompressed();
this.numBuckets = sd.getNumBuckets();
this.serDeInfo = new SerDeInfoWrapper(sd.getSerdeInfo());
// this.bucketCols = sd.getBucketCols();
this.sortCols = Lists.newArrayList();
for (Order o : sd.getSortCols()) {
this.sortCols.add(new OrderWrapper(o));
}
this.parameters = sd.getParameters();
}
示例3: StorageDescriptorWrapper
import org.apache.hadoop.hive.metastore.api.Order; //导入依赖的package包/类
public StorageDescriptorWrapper(StorageDescriptor storageDescriptor) {
sd = storageDescriptor;
location = storageDescriptor.getLocation();
inputFormat = storageDescriptor.getInputFormat();
outputFormat = storageDescriptor.getOutputFormat();
compressed = storageDescriptor.isCompressed();
numBuckets = storageDescriptor.getNumBuckets();
serDeInfo = new SerDeInfoWrapper(storageDescriptor.getSerdeInfo());
if (sd.getSortCols() != null) {
sortCols = Lists.newArrayList();
for (Order order : sd.getSortCols()) {
sortCols.add(new OrderWrapper(order));
}
}
parameters = storageDescriptor.getParameters();
if (sd.getCols() != null) {
this.columns = Lists.newArrayList();
for (FieldSchema fieldSchema : sd.getCols()) {
this.columns.add(new FieldSchemaWrapper(fieldSchema));
}
}
}
示例4: makeMetastoreTableObject
import org.apache.hadoop.hive.metastore.api.Order; //导入依赖的package包/类
public Table makeMetastoreTableObject(HiveMetaStoreClient client,
String dbName, String tabName, List<FieldSchema> cols) throws Exception {
Table tbl = new Table();
tbl.setDbName(dbName);
tbl.setTableName(tabName);
StorageDescriptor sd = new StorageDescriptor();
tbl.setSd(sd);
tbl.setParameters(new HashMap<String, String>());
sd.setCols(cols);
sd.setCompressed(false);
sd.setParameters(new HashMap<String, String>());
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters()
.put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.setSortCols(new ArrayList<Order>());
return tbl;
}
示例5: sdSortColsShortCircuit
import org.apache.hadoop.hive.metastore.api.Order; //导入依赖的package包/类
@Test
public void sdSortColsShortCircuit() {
left.getPartition().getSd().setSortCols(ImmutableList.of(new Order()));
List<Diff<Object, Object>> diffs = newPartitionAndMetadataComparator(SHORT_CIRCUIT).compare(left, right);
assertThat(diffs, is(notNullValue()));
assertThat(diffs.size(), is(1));
assertThat(diffs.get(0), is(newPropertyDiff(PartitionAndMetadata.class, "partition.sd.sortCols",
left.getPartition().getSd().getSortCols(), right.getPartition().getSd().getSortCols())));
}
示例6: sdSortColsFullComparison
import org.apache.hadoop.hive.metastore.api.Order; //导入依赖的package包/类
@Test
public void sdSortColsFullComparison() {
left.getPartition().getSd().setSortCols(ImmutableList.of(new Order()));
List<Diff<Object, Object>> diffs = newPartitionAndMetadataComparator(FULL_COMPARISON).compare(left, right);
assertThat(diffs, is(notNullValue()));
assertThat(diffs.size(), is(1));
assertThat(diffs.get(0), is(newPropertyDiff(PartitionAndMetadata.class, "partition.sd.sortCols",
left.getPartition().getSd().getSortCols(), right.getPartition().getSd().getSortCols())));
}
示例7: sdSortColsShortCircuit
import org.apache.hadoop.hive.metastore.api.Order; //导入依赖的package包/类
@Test
public void sdSortColsShortCircuit() {
left.getTable().getSd().setSortCols(ImmutableList.of(new Order()));
List<Diff<Object, Object>> diffs = newTableAndMetadataComparator(SHORT_CIRCUIT).compare(left, right);
assertThat(diffs, is(notNullValue()));
assertThat(diffs.size(), is(1));
assertThat(diffs.get(0), is(newPropertyDiff(TableAndMetadata.class, "table.sd.sortCols",
left.getTable().getSd().getSortCols(), right.getTable().getSd().getSortCols())));
}
示例8: sdSortColsFullComparison
import org.apache.hadoop.hive.metastore.api.Order; //导入依赖的package包/类
@Test
public void sdSortColsFullComparison() {
left.getTable().getSd().setSortCols(ImmutableList.of(new Order()));
List<Diff<Object, Object>> diffs = newTableAndMetadataComparator(FULL_COMPARISON).compare(left, right);
assertThat(diffs, is(notNullValue()));
assertThat(diffs.size(), is(1));
assertThat(diffs.get(0), is(newPropertyDiff(TableAndMetadata.class, "table.sd.sortCols",
left.getTable().getSd().getSortCols(), right.getTable().getSd().getSortCols())));
}
示例9: allShortCircuit
import org.apache.hadoop.hive.metastore.api.Order; //导入依赖的package包/类
@Test
public void allShortCircuit() {
left.getTable().getParameters().put("com.company.key", "value");
left.getTable().setPartitionKeys(ImmutableList.of(new FieldSchema("p", "string", "p comment")));
left.getTable().setOwner("left owner");
List<PrivilegeGrantInfo> privilege = ImmutableList.of(new PrivilegeGrantInfo());
left.getTable().setPrivileges(new PrincipalPrivilegeSet(ImmutableMap.of("write", privilege), null, null));
left.getTable().setRetention(2);
left.getTable().setTableType("internal");
left.getTable().getSd().setLocation("left");
left.getTable().getSd().setInputFormat("LeftInputFormat");
left.getTable().getSd().setOutputFormat("LeftOutputFormat");
left.getTable().getSd().getParameters().put("com.company.key", "value");
left.getTable().getSd().getSerdeInfo().setName("left serde info");
left.getTable().getSd().getSkewedInfo().setSkewedColNames(ImmutableList.of("left skewed col"));
left.getTable().getSd().setCols(ImmutableList.of(new FieldSchema("left", "type", "comment")));
left.getTable().getSd().setSortCols(ImmutableList.of(new Order()));
left.getTable().getSd().setBucketCols(ImmutableList.of("bucket"));
left.getTable().getSd().setNumBuckets(9000);
List<Diff<Object, Object>> diffs = newTableAndMetadataComparator(SHORT_CIRCUIT).compare(left, right);
assertThat(diffs, is(notNullValue()));
assertThat(diffs.size(), is(1));
assertThat(diffs.get(0), is(newPropertyDiff(TableAndMetadata.class, "table.parameters",
left.getTable().getParameters(), right.getTable().getParameters())));
}
示例10: getColumnNamesFromSortCols
import org.apache.hadoop.hive.metastore.api.Order; //导入依赖的package包/类
public static List<String> getColumnNamesFromSortCols(List<Order> sortCols) {
List<String> names = new ArrayList<String>();
for (Order o : sortCols) {
names.add(o.getCol());
}
return names;
}
示例11: createEvolvedDestinationTable
import org.apache.hadoop.hive.metastore.api.Order; //导入依赖的package包/类
private Optional<Table> createEvolvedDestinationTable(String tableName, String dbName, String location,
boolean withComment) {
List<FieldSchema> cols = new ArrayList<>();
// Existing columns that match avroToOrcSchemaEvolutionTest/source_schema_evolution_enabled.ddl
cols.add(new FieldSchema("parentFieldRecord__nestedFieldRecord__superNestedFieldString", "string",
withComment ? "from flatten_source parentFieldRecord.nestedFieldRecord.superNestedFieldString" : ""));
cols.add(new FieldSchema("parentFieldRecord__nestedFieldRecord__superNestedFieldInt", "int",
withComment ? "from flatten_source parentFieldRecord.nestedFieldRecord.superNestedFieldInt" : ""));
cols.add(new FieldSchema("parentFieldRecord__nestedFieldString", "string",
withComment ? "from flatten_source parentFieldRecord.nestedFieldString" : ""));
// The following column is skipped (simulating un-evolved schema):
// Column name : parentFieldRecord__nestedFieldInt
// Column type : int
// Column comment: from flatten_source parentFieldRecord.nestedFieldInt
cols.add(new FieldSchema("parentFieldInt", "int",
withComment ? "from flatten_source parentFieldInt" : ""));
// Extra schema
cols.add(new FieldSchema("parentFieldRecord__nestedFieldString2", "string",
withComment ? "from flatten_source parentFieldRecord.nestedFieldString2" : ""));
String inputFormat = "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat";
String outputFormat = "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat";
StorageDescriptor storageDescriptor = new StorageDescriptor(cols, location, inputFormat, outputFormat, false, 0,
new SerDeInfo(), null, Lists.<Order>newArrayList(), null);
Table table = new Table(tableName, dbName, "ketl_dev", 0, 0, 0, storageDescriptor,
Lists.<FieldSchema>newArrayList(), Maps.<String,String>newHashMap(), "", "", "");
return Optional.of(table);
}
示例12: allFullComparison
import org.apache.hadoop.hive.metastore.api.Order; //导入依赖的package包/类
@Test
public void allFullComparison() {
left.getPartition().getParameters().put("com.company.key", "value");
left.getPartition().setValues(ImmutableList.of("p1", "p2"));
List<PrivilegeGrantInfo> privilege = ImmutableList.of(new PrivilegeGrantInfo());
left.getPartition().setPrivileges(new PrincipalPrivilegeSet(ImmutableMap.of("write", privilege), null, null));
left.getPartition().getSd().setLocation("left");
left.getPartition().getSd().setInputFormat("LeftInputFormat");
left.getPartition().getSd().setOutputFormat("LeftOutputFormat");
left.getPartition().getSd().getParameters().put("com.company.key", "value");
left.getPartition().getSd().getSerdeInfo().setName("left serde info");
left.getPartition().getSd().getSkewedInfo().setSkewedColNames(ImmutableList.of("left skewed col"));
left.getPartition().getSd().setCols(ImmutableList.of(new FieldSchema("left", "type", "comment")));
left.getPartition().getSd().setSortCols(ImmutableList.of(new Order()));
left.getPartition().getSd().setBucketCols(ImmutableList.of("bucket"));
left.getPartition().getSd().setNumBuckets(9000);
List<Diff<Object, Object>> diffs = newPartitionAndMetadataComparator(FULL_COMPARISON).compare(left, right);
assertThat(diffs, is(notNullValue()));
assertThat(diffs.size(), is(10));
assertThat(diffs.get(0), is(newPropertyDiff(PartitionAndMetadata.class, "partition.parameters",
left.getPartition().getParameters(), right.getPartition().getParameters())));
assertThat(diffs.get(1), is(newPropertyDiff(PartitionAndMetadata.class, "partition.sd.inputFormat",
left.getPartition().getSd().getInputFormat(), right.getPartition().getSd().getInputFormat())));
assertThat(diffs.get(2), is(newPropertyDiff(PartitionAndMetadata.class, "partition.sd.outputFormat",
left.getPartition().getSd().getOutputFormat(), right.getPartition().getSd().getOutputFormat())));
assertThat(diffs.get(3), is(newPropertyDiff(PartitionAndMetadata.class, "partition.sd.parameters",
left.getPartition().getSd().getParameters(), right.getPartition().getSd().getParameters())));
assertThat(diffs.get(4), is(newPropertyDiff(PartitionAndMetadata.class, "partition.sd.serdeInfo",
left.getPartition().getSd().getSerdeInfo(), right.getPartition().getSd().getSerdeInfo())));
assertThat(diffs.get(5), is(newPropertyDiff(PartitionAndMetadata.class, "partition.sd.skewedInfo",
left.getPartition().getSd().getSkewedInfo(), right.getPartition().getSd().getSkewedInfo())));
assertThat(diffs.get(6),
is(newDiff(
"Collection partition.sd.cols of class com.google.common.collect.SingletonImmutableList has different size: left.size()=1 and right.size()=2",
left.getPartition().getSd().getCols(), right.getPartition().getSd().getCols())));
assertThat(diffs.get(7), is(newPropertyDiff(PartitionAndMetadata.class, "partition.sd.sortCols",
left.getPartition().getSd().getSortCols(), right.getPartition().getSd().getSortCols())));
assertThat(diffs.get(8), is(newPropertyDiff(PartitionAndMetadata.class, "partition.sd.bucketCols",
left.getPartition().getSd().getBucketCols(), right.getPartition().getSd().getBucketCols())));
assertThat(diffs.get(9), is(newPropertyDiff(PartitionAndMetadata.class, "partition.sd.numBuckets",
left.getPartition().getSd().getNumBuckets(), right.getPartition().getSd().getNumBuckets())));
}
示例13: allFullComparison
import org.apache.hadoop.hive.metastore.api.Order; //导入依赖的package包/类
@Test
public void allFullComparison() {
left.getTable().getParameters().put("com.company.key", "value");
left.getTable().setPartitionKeys(ImmutableList.of(new FieldSchema("p", "string", "p comment")));
left.getTable().setOwner("left owner");
List<PrivilegeGrantInfo> privilege = ImmutableList.of(new PrivilegeGrantInfo());
left.getTable().setPrivileges(new PrincipalPrivilegeSet(ImmutableMap.of("write", privilege), null, null));
left.getTable().setRetention(2);
left.getTable().setTableType("internal");
left.getTable().getSd().setLocation("left");
left.getTable().getSd().setInputFormat("LeftInputFormat");
left.getTable().getSd().setOutputFormat("LeftOutputFormat");
left.getTable().getSd().getParameters().put("com.company.key", "value");
left.getTable().getSd().getSerdeInfo().setName("left serde info");
left.getTable().getSd().getSkewedInfo().setSkewedColNames(ImmutableList.of("left skewed col"));
left.getTable().getSd().setCols(ImmutableList.of(new FieldSchema("left", "type", "comment")));
left.getTable().getSd().setSortCols(ImmutableList.of(new Order()));
left.getTable().getSd().setBucketCols(ImmutableList.of("bucket"));
left.getTable().getSd().setNumBuckets(9000);
List<Diff<Object, Object>> diffs = newTableAndMetadataComparator(FULL_COMPARISON).compare(left, right);
assertThat(diffs, is(notNullValue()));
assertThat(diffs.size(), is(12));
assertThat(diffs.get(0), is(newPropertyDiff(TableAndMetadata.class, "table.parameters",
left.getTable().getParameters(), right.getTable().getParameters())));
assertThat(diffs.get(1), is(newPropertyDiff(TableAndMetadata.class, "table.partitionKeys",
left.getTable().getPartitionKeys(), right.getTable().getPartitionKeys())));
assertThat(diffs.get(2), is(newPropertyDiff(TableAndMetadata.class, "table.retention",
left.getTable().getRetention(), right.getTable().getRetention())));
assertThat(diffs.get(3), is(newPropertyDiff(TableAndMetadata.class, "table.sd.inputFormat",
left.getTable().getSd().getInputFormat(), right.getTable().getSd().getInputFormat())));
assertThat(diffs.get(4), is(newPropertyDiff(TableAndMetadata.class, "table.sd.outputFormat",
left.getTable().getSd().getOutputFormat(), right.getTable().getSd().getOutputFormat())));
assertThat(diffs.get(5), is(newPropertyDiff(TableAndMetadata.class, "table.sd.parameters",
left.getTable().getSd().getParameters(), right.getTable().getSd().getParameters())));
assertThat(diffs.get(6), is(newPropertyDiff(TableAndMetadata.class, "table.sd.serdeInfo",
left.getTable().getSd().getSerdeInfo(), right.getTable().getSd().getSerdeInfo())));
assertThat(diffs.get(7), is(newPropertyDiff(TableAndMetadata.class, "table.sd.skewedInfo",
left.getTable().getSd().getSkewedInfo(), right.getTable().getSd().getSkewedInfo())));
assertThat(diffs.get(8),
is(newDiff(
"Collection table.sd.cols of class com.google.common.collect.SingletonImmutableList has different size: left.size()=1 and right.size()=2",
left.getTable().getSd().getCols(), right.getTable().getSd().getCols())));
assertThat(diffs.get(9), is(newPropertyDiff(TableAndMetadata.class, "table.sd.sortCols",
left.getTable().getSd().getSortCols(), right.getTable().getSd().getSortCols())));
assertThat(diffs.get(10), is(newPropertyDiff(TableAndMetadata.class, "table.sd.bucketCols",
left.getTable().getSd().getBucketCols(), right.getTable().getSd().getBucketCols())));
assertThat(diffs.get(11), is(newPropertyDiff(TableAndMetadata.class, "table.sd.numBuckets",
left.getTable().getSd().getNumBuckets(), right.getTable().getSd().getNumBuckets())));
}
示例14: OrderWrapper
import org.apache.hadoop.hive.metastore.api.Order; //导入依赖的package包/类
public OrderWrapper(Order ord) {
this.ord = ord;
this.col = ord.getCol();
this.order = ord.getOrder();
}
示例15: getOrder
import org.apache.hadoop.hive.metastore.api.Order; //导入依赖的package包/类
@JsonIgnore
public Order getOrder() {
return ord;
}