本文整理汇总了Java中org.apache.hadoop.hive.ql.io.RecordIdentifier类的典型用法代码示例。如果您正苦于以下问题:Java RecordIdentifier类的具体用法?Java RecordIdentifier怎么用?Java RecordIdentifier使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RecordIdentifier类属于org.apache.hadoop.hive.ql.io包,在下文中一共展示了RecordIdentifier类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: newStructTypeInfo
import org.apache.hadoop.hive.ql.io.RecordIdentifier; //导入依赖的package包/类
static StructTypeInfo newStructTypeInfo(Fields fields) {
List<String> names = new ArrayList<>();
List<TypeInfo> typeInfos = new ArrayList<>();
for (int i = 0; i < fields.size(); i++) {
String name = fields.get(i).toString();
if (ROW_ID_NAME.equals(name)) {
if (!fields.getTypeClass(i).equals(RecordIdentifier.class)) {
throw new IllegalArgumentException(ROW_ID_NAME + " column is not of type "
+ RecordIdentifier.class.getSimpleName() + ". Found type: " + fields.getTypeClass(i));
}
continue;
}
names.add(name.toLowerCase());
Class<?> type = fields.getTypeClass(i);
if (type == null) {
throw new IllegalArgumentException("Missing type information for field: " + name);
}
TypeInfo typeInfo = getTypeInfoFromClass(type);
typeInfos.add(typeInfo);
}
return (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(names, typeInfos);
}
示例2: Corc
import org.apache.hadoop.hive.ql.io.RecordIdentifier; //导入依赖的package包/类
public Corc(StructTypeInfo typeInfo, ConverterFactory factory) {
LOG.debug("TypeInfo: {}", typeInfo);
inspector = (SettableStructObjectInspector) OrcStruct.createObjectInspector(typeInfo);
struct = (OrcStruct) inspector.create();
this.factory = factory;
recordIdentifier = new RecordIdentifier();
}
示例3: readFromTransactionalTableWithRowId
import org.apache.hadoop.hive.ql.io.RecordIdentifier; //导入依赖的package包/类
@Test
public void readFromTransactionalTableWithRowId() throws Exception {
// "ROW__ID" is a magic value
Fields fields = new Fields(names("ROW__ID", "id", "msg"), types(RecordIdentifier.class, int.class, String.class));
List<TupleEntry> actual = Plunger.readDataFromTap(
new Hfs(OrcFile.source().declaredFields(fields).schemaFromFile().build(),
"src/test/data/test_table/continent=Asia/country=India")).asTupleEntryList();
List<TupleEntry> expected = new DataBuilder(fields)
.addTuple(new RecordIdentifier(1, 0, 1), 2, "UPDATED: Streaming to welcome")
.addTuple(new RecordIdentifier(7, 0, 0), 3, "updated")
.build()
.asTupleEntryList();
assertThat(actual, is(tupleEntryList(expected)));
}
示例4: getRecordIdentifier
import org.apache.hadoop.hive.ql.io.RecordIdentifier; //导入依赖的package包/类
public RecordIdentifier getRecordIdentifier() {
RecordIdentifier copy = new RecordIdentifier();
copy.set(recordIdentifier);
LOG.debug("Fetched recordIdentifier={}", recordIdentifier);
return copy;
}
示例5: setRecordIdentifier
import org.apache.hadoop.hive.ql.io.RecordIdentifier; //导入依赖的package包/类
public void setRecordIdentifier(RecordIdentifier recordIdentifier) {
this.recordIdentifier.set(recordIdentifier);
LOG.debug("Set recordIdentifier={}", recordIdentifier);
}