本文整理汇总了Java中org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfoFromTypeString方法的典型用法代码示例。如果您正苦于以下问题:Java TypeInfoUtils.getTypeInfoFromTypeString方法的具体用法?Java TypeInfoUtils.getTypeInfoFromTypeString怎么用?Java TypeInfoUtils.getTypeInfoFromTypeString使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils
的用法示例。
在下文中一共展示了TypeInfoUtils.getTypeInfoFromTypeString方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: toMetacatType
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Override
public Type toMetacatType(final String type) {
// Hack to fix presto "varchar" type coming in with no length which is required by Hive.
final TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(
"varchar".equals(type.toLowerCase()) ? serdeConstants.STRING_TYPE_NAME : type);
ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
// The standard struct object inspector forces field names to lower case, however in Metacat we need to preserve
// the original case of the struct fields so we wrap it with our wrapper to force the fieldNames to keep
// their original case
if (typeInfo.getCategory().equals(ObjectInspector.Category.STRUCT)) {
final StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
final StandardStructObjectInspector objectInspector = (StandardStructObjectInspector) oi;
oi = new HiveTypeConverter.SameCaseStandardStructObjectInspector(
structTypeInfo.getAllStructFieldNames(), objectInspector);
}
return getCanonicalType(oi);
}
示例2: typical
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Test
public void typical() throws IOException {
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString("struct<a:string>");
ObjectInspector inspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
WriterOptions options = OrcFile.writerOptions(conf).inspector(inspector);
Path path = new Path(temporaryFolder.getRoot().getCanonicalPath(), "part-00000");
Writer writer = OrcFile.createWriter(path, options);
writer.addRow(Arrays.asList("hello"));
writer.close();
try (OrcReader reader = new OrcReader(conf, path)) {
List<Object> next = reader.next();
assertThat(next.size(), is(1));
assertThat(next.get(0), is((Object) "hello"));
assertThat(reader.hasNext(), is(false));
}
}
示例3: getVectorType
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Override
public TypeProtos.MajorType getVectorType(SchemaPath column, PlannerSettings plannerSettings) {
HiveScan hiveScan = (HiveScan) scanRel.getGroupScan();
String partitionName = column.getAsNamePart().getName();
Map<String, String> partitionNameTypeMap = hiveScan.hiveReadEntry.table.getPartitionNameTypeMap();
String hiveType = partitionNameTypeMap.get(partitionName);
PrimitiveTypeInfo primitiveTypeInfo = (PrimitiveTypeInfo) TypeInfoUtils.getTypeInfoFromTypeString(hiveType);
TypeProtos.MinorType partitionType = HiveUtilities.getMinorTypeFromHivePrimitiveTypeInfo(primitiveTypeInfo,
plannerSettings.getOptions());
return TypeProtos.MajorType.newBuilder().setMode(TypeProtos.DataMode.OPTIONAL).setMinorType(partitionType).build();
}
示例4: OrcStreamWriter
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
public OrcStreamWriter( final Configuration config, final Path path, final String schema ) throws IOException{
FileSystem fs = FileSystem.get(config);
long stripeSize = HiveConf.getLongVar(config, HiveConf.ConfVars.HIVE_ORC_DEFAULT_STRIPE_SIZE);
CompressionKind compress = CompressionKind.valueOf(HiveConf.getVar(config, HiveConf.ConfVars.HIVE_ORC_DEFAULT_COMPRESS));
int bufferSize = HiveConf.getIntVar(config, HiveConf.ConfVars.HIVE_ORC_DEFAULT_BUFFER_SIZE);
int rowIndexStride = HiveConf.getIntVar(config, HiveConf.ConfVars.HIVE_ORC_DEFAULT_ROW_INDEX_STRIDE);
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString( schema );
ObjectInspector inspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo( typeInfo );
writer = OrcFile.createWriter( fs, path, config, inspector, stripeSize, compress, bufferSize, rowIndexStride );
formatter = OrcFormatterFactory.get( typeInfo );
}
示例5: getSchemaTypeInfo
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
/**
* Gets the StructTypeInfo that declares the total schema of the file from the configuration
*/
static StructTypeInfo getSchemaTypeInfo(Configuration conf) {
String schemaTypeInfo = conf.get(SCHEMA_TYPE_INFO);
if (schemaTypeInfo != null && !schemaTypeInfo.isEmpty()) {
LOG.debug("Got schema typeInfo from conf: {}", schemaTypeInfo);
return (StructTypeInfo) TypeInfoUtils.getTypeInfoFromTypeString(conf.get(SCHEMA_TYPE_INFO));
}
return null;
}
示例6: CobolStringField
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
public CobolStringField(String debugInfo, int levelNo, String name,
String picClause) {
super();
super.debugInfo = debugInfo;
super.levelNo = levelNo;
super.type = CobolFieldType.STRING;
super.name = name;
String fieldType = "string";
if (picClause.contains("(")) {
String[] s = picClause.split("\\(|\\)|\\.");
if (s.length == 2) {
try {
super.length = Integer.parseInt(s[1]);
} catch (NumberFormatException e) {
throw e;
}
} else {
throw new RuntimeException(
"Alphanumeric Picture clause has more brackets:"
+ this.debugInfo);
}
} else {
if (picClause.trim().toLowerCase().matches("[x|a]+\\."))
super.length = picClause.length() -1;
else if (picClause.trim().toLowerCase().matches("[x|a]+"))
super.length = picClause.length();
else {
throw new RuntimeException(
"Alphanumeric Picture clause incorrect '"
+ this.debugInfo);
}
}
if (super.length < 65355) {
fieldType = "varchar(" + this.length + ")";
}
super.typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(fieldType);
this.oi = TypeInfoUtils
.getStandardJavaObjectInspectorFromTypeInfo(this.typeInfo);
}
示例7: initColumnFields
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
/**
* Inits the column fields.
*
* @param metadata the metadata
*/
private void initColumnFields(LensResultSetMetadata metadata) {
StringBuilder typesSb = new StringBuilder();
StringBuilder headerTypes = new StringBuilder();
if ((metadata != null) && (metadata.getColumns() != null) && (!metadata.getColumns().isEmpty())) {
for (int pos = 0; pos < metadata.getColumns().size(); pos++) {
if (pos != 0) {
typesSb.append(",");
headerTypes.append(",");
}
String name = metadata.getColumns().get(pos).getName();
String type = LensResultSetMetadata.getQualifiedTypeName(metadata.getColumns().get(pos).getTypeDescriptor());
typesSb.append(type);
columnNames.add(name);
escapedColumnNames.add(StringEscapeUtils.escapeCsv(name));
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(type);
columnTypes.add(typeInfo);
columnOIs.add(TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo));
columnHeaderOIs.add(TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(TypeInfoUtils
.getTypeInfoFromTypeString(HEADER_TYPE)));
headerTypes.append(HEADER_TYPE);
}
}
types = typesSb.toString();
htypes = headerTypes.toString();
}
示例8: getPartitionValue
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
private static PartitionValue getPartitionValue(FieldSchema partitionCol, String value) {
final TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(partitionCol.getType());
PartitionValue out = new PartitionValue();
out.setColumn(partitionCol.getName());
if("__HIVE_DEFAULT_PARTITION__".equals(value)){
return out;
}
switch (typeInfo.getCategory()) {
case PRIMITIVE:
final PrimitiveTypeInfo primitiveTypeInfo = (PrimitiveTypeInfo) typeInfo;
switch (((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory()) {
case BINARY:
return out.setBinaryValue(ByteString.copyFrom(value.getBytes()));
case BOOLEAN:
return out.setBitValue(Boolean.parseBoolean(value));
case DOUBLE:
return out.setDoubleValue(Double.parseDouble(value));
case FLOAT:
return out.setFloatValue(Float.parseFloat(value));
case BYTE:
case SHORT:
case INT:
return out.setIntValue(Integer.parseInt(value));
case LONG:
return out.setLongValue(Long.parseLong(value));
case STRING:
case VARCHAR:
return out.setStringValue(value);
case CHAR:
return out.setStringValue(value.trim());
case TIMESTAMP:
return out.setLongValue(DateTimes.toMillisFromJdbcTimestamp(value));
case DATE:
return out.setLongValue(DateTimes.toMillisFromJdbcDate(value));
case DECIMAL:
DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo;
if(decimalTypeInfo.getPrecision() > 38){
throw UserException.unsupportedError()
.message("Dremio only supports decimals up to 38 digits in precision. This Hive table has a partition value with scale of %d digits.", decimalTypeInfo.getPrecision())
.build(logger);
}
HiveDecimal decimal = HiveDecimalUtils.enforcePrecisionScale(HiveDecimal.create(value), decimalTypeInfo.precision(), decimalTypeInfo.scale());
final BigDecimal original = decimal.bigDecimalValue();
// we can't just use unscaledValue() since BigDecimal doesn't store trailing zeroes and we need to ensure decoding includes the correct scale.
final BigInteger unscaled = original.movePointRight(decimalTypeInfo.scale()).unscaledValue();
return out.setBinaryValue(ByteString.copyFrom(DecimalTools.signExtend16(unscaled.toByteArray())));
default:
HiveUtilities.throwUnsupportedHiveDataTypeError(((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory().toString());
}
default:
HiveUtilities.throwUnsupportedHiveDataTypeError(typeInfo.getCategory().toString());
}
return null; // unreachable
}
示例9: getTypeInfo
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
/**
* Gets the StructTypeInfo that declares the columns to be read from the configuration
*/
static StructTypeInfo getTypeInfo(Configuration conf) {
StructTypeInfo inputTypeInfo = (StructTypeInfo) TypeInfoUtils.getTypeInfoFromTypeString(conf.get(INPUT_TYPE_INFO));
LOG.debug("Got input typeInfo from conf: {}", inputTypeInfo);
return inputTypeInfo;
}
示例10: setFieldTypeInfo
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
private void setFieldTypeInfo() {
this.fieldTypeInfo = TypeInfoUtils
.getTypeInfoFromTypeString(this.fieldType);
}
示例11: createJavaObjectInspectorFromFieldSchema
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
public static ObjectInspector createJavaObjectInspectorFromFieldSchema(String columnTypeString) {
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(columnTypeString);
return TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
}
示例12: testInspectorFromTypeInfo
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Test
public void testInspectorFromTypeInfo() throws Exception {
TypeInfo typeInfo =
TypeInfoUtils.getTypeInfoFromTypeString("struct<c1:boolean,c2:tinyint" +
",c3:smallint,c4:int,c5:bigint,c6:float,c7:double,c8:binary," +
"c9:string,c10:struct<c1:int>,c11:map<int,int>,c12:uniontype<int>" +
",c13:array<timestamp>>");
StructObjectInspector inspector = (StructObjectInspector)
OrcLazyObjectInspectorUtils.createWritableObjectInspector(typeInfo);
assertEquals("struct<c1:boolean,c2:tinyint,c3:smallint,c4:int,c5:" +
"bigint,c6:float,c7:double,c8:binary,c9:string,c10:struct<" +
"c1:int>,c11:map<int,int>,c12:uniontype<int>,c13:array<timestamp>>",
inspector.getTypeName());
assertEquals(null,
inspector.getAllStructFieldRefs().get(0).getFieldComment());
assertEquals(null, inspector.getStructFieldRef("UNKNOWN"));
final List<String> fieldNames = ImmutableList.of(
"field0", "field1", "field2", "field3", "field4",
"field5", "field6", "field7", "field8", "field9",
"field10", "field11", "field12");
OrcStruct s1 = new OrcStruct(fieldNames);
for(int i=0; i < 13; ++i) {
s1.setFieldValue(i, i);
}
final List<Object> list = ImmutableList.of(
(Object)0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12);
assertEquals(list, inspector.getStructFieldsDataAsList(s1));
ListObjectInspector listOI = (ListObjectInspector)
inspector.getAllStructFieldRefs().get(12).getFieldObjectInspector();
assertEquals(ObjectInspector.Category.LIST, listOI.getCategory());
assertEquals(10, listOI.getListElement(list, 10));
assertEquals(13, listOI.getListLength(list));
final Map<Integer, Integer> map = ImmutableMap.of(1,2,
2,4,
3,6);
MapObjectInspector mapOI = (MapObjectInspector)
inspector.getAllStructFieldRefs().get(10).getFieldObjectInspector();
assertEquals(3, mapOI.getMapSize(map));
assertEquals(4, mapOI.getMapValueElement(map, 2));
}
示例13: schema
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
/**
* Specify the schema of the file. All Hive types are supported. If the {@link #declaredFields(Fields)} and
* {@link #columns(StructTypeInfo) columns(...)} options were not specified, the declared {@link Fields} will be
* derived from the supplied schema {@link StructTypeInfo}.
* <p/>
* Should the respective schema {@link TypeInfo} for a column be different from the {@link TypeInfo} in the actual
* ORC File then a {@link ClassCastException} will be thrown when reading the records.
*/
public SourceBuilder schema(String typeInfoString) {
checkExistingSchema();
schemaTypeInfo = (StructTypeInfo) TypeInfoUtils.getTypeInfoFromTypeString(typeInfoString);
return this;
}