当前位置: 首页>>代码示例>>Java>>正文


Java TypeInfoUtils.getTypeInfoFromTypeString方法代码示例

本文整理汇总了Java中org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfoFromTypeString方法的典型用法代码示例。如果您正苦于以下问题:Java TypeInfoUtils.getTypeInfoFromTypeString方法的具体用法?Java TypeInfoUtils.getTypeInfoFromTypeString怎么用?Java TypeInfoUtils.getTypeInfoFromTypeString使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils的用法示例。


在下文中一共展示了TypeInfoUtils.getTypeInfoFromTypeString方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: toMetacatType

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Override
public Type toMetacatType(final String type) {
    // Hack to fix presto "varchar" type coming in with no length which is required by Hive.
    final TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(
        "varchar".equals(type.toLowerCase()) ? serdeConstants.STRING_TYPE_NAME : type);
    ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
    // The standard struct object inspector forces field names to lower case, however in Metacat we need to preserve
    // the original case of the struct fields so we wrap it with our wrapper to force the fieldNames to keep
    // their original case
    if (typeInfo.getCategory().equals(ObjectInspector.Category.STRUCT)) {
        final StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
        final StandardStructObjectInspector objectInspector = (StandardStructObjectInspector) oi;
        oi = new HiveTypeConverter.SameCaseStandardStructObjectInspector(
            structTypeInfo.getAllStructFieldNames(), objectInspector);
    }
    return getCanonicalType(oi);
}
 
开发者ID:Netflix,项目名称:metacat,代码行数:18,代码来源:HiveTypeConverter.java

示例2: typical

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Test
public void typical() throws IOException {
  TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString("struct<a:string>");
  ObjectInspector inspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
  WriterOptions options = OrcFile.writerOptions(conf).inspector(inspector);

  Path path = new Path(temporaryFolder.getRoot().getCanonicalPath(), "part-00000");

  Writer writer = OrcFile.createWriter(path, options);
  writer.addRow(Arrays.asList("hello"));
  writer.close();

  try (OrcReader reader = new OrcReader(conf, path)) {
    List<Object> next = reader.next();
    assertThat(next.size(), is(1));
    assertThat(next.get(0), is((Object) "hello"));
    assertThat(reader.hasNext(), is(false));
  }

}
 
开发者ID:HotelsDotCom,项目名称:corc,代码行数:21,代码来源:OrcReaderTest.java

示例3: getVectorType

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Override
public TypeProtos.MajorType getVectorType(SchemaPath column, PlannerSettings plannerSettings) {
  HiveScan hiveScan = (HiveScan) scanRel.getGroupScan();
  String partitionName = column.getAsNamePart().getName();
  Map<String, String> partitionNameTypeMap = hiveScan.hiveReadEntry.table.getPartitionNameTypeMap();
  String hiveType = partitionNameTypeMap.get(partitionName);
  PrimitiveTypeInfo primitiveTypeInfo = (PrimitiveTypeInfo) TypeInfoUtils.getTypeInfoFromTypeString(hiveType);

  TypeProtos.MinorType partitionType = HiveUtilities.getMinorTypeFromHivePrimitiveTypeInfo(primitiveTypeInfo,
      plannerSettings.getOptions());
  return TypeProtos.MajorType.newBuilder().setMode(TypeProtos.DataMode.OPTIONAL).setMinorType(partitionType).build();
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:13,代码来源:HivePartitionDescriptor.java

示例4: OrcStreamWriter

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
public OrcStreamWriter( final Configuration config, final Path path, final String schema ) throws IOException{
  FileSystem fs = FileSystem.get(config);
  long stripeSize = HiveConf.getLongVar(config, HiveConf.ConfVars.HIVE_ORC_DEFAULT_STRIPE_SIZE);
  CompressionKind compress = CompressionKind.valueOf(HiveConf.getVar(config, HiveConf.ConfVars.HIVE_ORC_DEFAULT_COMPRESS));
  int bufferSize = HiveConf.getIntVar(config, HiveConf.ConfVars.HIVE_ORC_DEFAULT_BUFFER_SIZE);
  int rowIndexStride =  HiveConf.getIntVar(config, HiveConf.ConfVars.HIVE_ORC_DEFAULT_ROW_INDEX_STRIDE);

  TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString( schema );
  ObjectInspector inspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo( typeInfo );
  writer = OrcFile.createWriter( fs, path, config, inspector, stripeSize, compress, bufferSize, rowIndexStride );
  formatter = OrcFormatterFactory.get( typeInfo );
}
 
开发者ID:yahoojapan,项目名称:dataplatform-schema-lib,代码行数:13,代码来源:OrcStreamWriter.java

示例5: getSchemaTypeInfo

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
/**
 * Gets the StructTypeInfo that declares the total schema of the file from the configuration
 */
static StructTypeInfo getSchemaTypeInfo(Configuration conf) {
  String schemaTypeInfo = conf.get(SCHEMA_TYPE_INFO);
  if (schemaTypeInfo != null && !schemaTypeInfo.isEmpty()) {
    LOG.debug("Got schema typeInfo from conf: {}", schemaTypeInfo);
    return (StructTypeInfo) TypeInfoUtils.getTypeInfoFromTypeString(conf.get(SCHEMA_TYPE_INFO));
  }
  return null;
}
 
开发者ID:HotelsDotCom,项目名称:corc,代码行数:12,代码来源:CorcInputFormat.java

示例6: CobolStringField

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
public CobolStringField(String debugInfo, int levelNo, String name,
		String picClause) {
	super();
	super.debugInfo = debugInfo;
	super.levelNo = levelNo;
	super.type = CobolFieldType.STRING;
	super.name = name;
	String fieldType = "string";
	if (picClause.contains("(")) {
		String[] s = picClause.split("\\(|\\)|\\.");
		if (s.length == 2) {
			try {
				super.length = Integer.parseInt(s[1]);
			} catch (NumberFormatException e) {
				throw e;
			}
		} else {
			throw new RuntimeException(
					"Alphanumeric Picture clause has more brackets:"
							+ this.debugInfo);
		}
	} else {
		if (picClause.trim().toLowerCase().matches("[x|a]+\\."))
			super.length = picClause.length() -1;
		else if (picClause.trim().toLowerCase().matches("[x|a]+"))
			super.length = picClause.length();
		else {
			throw new RuntimeException(
					"Alphanumeric Picture clause incorrect '"
							+ this.debugInfo);

		}
	}
	if (super.length < 65355) {
		fieldType = "varchar(" + this.length + ")";
	}
	super.typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(fieldType);
	this.oi = TypeInfoUtils
			.getStandardJavaObjectInspectorFromTypeInfo(this.typeInfo);
}
 
开发者ID:rbheemana,项目名称:Cobol-to-Hive,代码行数:41,代码来源:CobolStringField.java

示例7: initColumnFields

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
/**
 * Inits the column fields.
 *
 * @param metadata the metadata
 */
private void initColumnFields(LensResultSetMetadata metadata) {
  StringBuilder typesSb = new StringBuilder();
  StringBuilder headerTypes = new StringBuilder();

  if ((metadata != null) && (metadata.getColumns() != null) && (!metadata.getColumns().isEmpty())) {
    for (int pos = 0; pos < metadata.getColumns().size(); pos++) {
      if (pos != 0) {
        typesSb.append(",");
        headerTypes.append(",");
      }
      String name = metadata.getColumns().get(pos).getName();
      String type = LensResultSetMetadata.getQualifiedTypeName(metadata.getColumns().get(pos).getTypeDescriptor());
      typesSb.append(type);
      columnNames.add(name);
      escapedColumnNames.add(StringEscapeUtils.escapeCsv(name));
      TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(type);
      columnTypes.add(typeInfo);
      columnOIs.add(TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo));
      columnHeaderOIs.add(TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(TypeInfoUtils
        .getTypeInfoFromTypeString(HEADER_TYPE)));
      headerTypes.append(HEADER_TYPE);
    }
  }

  types = typesSb.toString();
  htypes = headerTypes.toString();
}
 
开发者ID:apache,项目名称:lens,代码行数:33,代码来源:AbstractOutputFormatter.java

示例8: getPartitionValue

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
private static PartitionValue getPartitionValue(FieldSchema partitionCol, String value) {
  final TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(partitionCol.getType());
  PartitionValue out = new PartitionValue();
  out.setColumn(partitionCol.getName());

  if("__HIVE_DEFAULT_PARTITION__".equals(value)){
    return out;
  }

  switch (typeInfo.getCategory()) {
    case PRIMITIVE:
      final PrimitiveTypeInfo primitiveTypeInfo = (PrimitiveTypeInfo) typeInfo;
      switch (((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory()) {
        case BINARY:
          return out.setBinaryValue(ByteString.copyFrom(value.getBytes()));
        case BOOLEAN:
          return out.setBitValue(Boolean.parseBoolean(value));
        case DOUBLE:
          return out.setDoubleValue(Double.parseDouble(value));
        case FLOAT:
          return out.setFloatValue(Float.parseFloat(value));
        case BYTE:
        case SHORT:
        case INT:
          return out.setIntValue(Integer.parseInt(value));
        case LONG:
          return out.setLongValue(Long.parseLong(value));
        case STRING:
        case VARCHAR:
          return out.setStringValue(value);
        case CHAR:
          return out.setStringValue(value.trim());
        case TIMESTAMP:
          return out.setLongValue(DateTimes.toMillisFromJdbcTimestamp(value));
        case DATE:
          return out.setLongValue(DateTimes.toMillisFromJdbcDate(value));
        case DECIMAL:
          DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo;
          if(decimalTypeInfo.getPrecision() > 38){
            throw UserException.unsupportedError()
              .message("Dremio only supports decimals up to 38 digits in precision. This Hive table has a partition value with scale of %d digits.", decimalTypeInfo.getPrecision())
              .build(logger);
          }
          HiveDecimal decimal = HiveDecimalUtils.enforcePrecisionScale(HiveDecimal.create(value), decimalTypeInfo.precision(), decimalTypeInfo.scale());
          final BigDecimal original = decimal.bigDecimalValue();
          // we can't just use unscaledValue() since BigDecimal doesn't store trailing zeroes and we need to ensure decoding includes the correct scale.
          final BigInteger unscaled = original.movePointRight(decimalTypeInfo.scale()).unscaledValue();
          return out.setBinaryValue(ByteString.copyFrom(DecimalTools.signExtend16(unscaled.toByteArray())));
        default:
          HiveUtilities.throwUnsupportedHiveDataTypeError(((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory().toString());
      }
    default:
      HiveUtilities.throwUnsupportedHiveDataTypeError(typeInfo.getCategory().toString());
  }

  return null; // unreachable
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:58,代码来源:DatasetBuilder.java

示例9: getTypeInfo

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
/**
 * Gets the StructTypeInfo that declares the columns to be read from the configuration
 */
static StructTypeInfo getTypeInfo(Configuration conf) {
  StructTypeInfo inputTypeInfo = (StructTypeInfo) TypeInfoUtils.getTypeInfoFromTypeString(conf.get(INPUT_TYPE_INFO));
  LOG.debug("Got input typeInfo from conf: {}", inputTypeInfo);
  return inputTypeInfo;
}
 
开发者ID:HotelsDotCom,项目名称:corc,代码行数:9,代码来源:CorcInputFormat.java

示例10: setFieldTypeInfo

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
private void setFieldTypeInfo() {
	this.fieldTypeInfo = TypeInfoUtils
			.getTypeInfoFromTypeString(this.fieldType);
}
 
开发者ID:rbheemana,项目名称:Cobol-to-Hive,代码行数:5,代码来源:CobolFieldDecl.java

示例11: createJavaObjectInspectorFromFieldSchema

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
public static ObjectInspector createJavaObjectInspectorFromFieldSchema(String columnTypeString) {
  TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(columnTypeString);
  return TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
}
 
开发者ID:facebookarchive,项目名称:hive-dwrf,代码行数:5,代码来源:TestRecordReaderImpl.java

示例12: testInspectorFromTypeInfo

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Test
public void testInspectorFromTypeInfo() throws Exception {
  TypeInfo typeInfo =
      TypeInfoUtils.getTypeInfoFromTypeString("struct<c1:boolean,c2:tinyint" +
          ",c3:smallint,c4:int,c5:bigint,c6:float,c7:double,c8:binary," +
          "c9:string,c10:struct<c1:int>,c11:map<int,int>,c12:uniontype<int>" +
          ",c13:array<timestamp>>");
  StructObjectInspector inspector = (StructObjectInspector)
      OrcLazyObjectInspectorUtils.createWritableObjectInspector(typeInfo);
  assertEquals("struct<c1:boolean,c2:tinyint,c3:smallint,c4:int,c5:" +
      "bigint,c6:float,c7:double,c8:binary,c9:string,c10:struct<" +
      "c1:int>,c11:map<int,int>,c12:uniontype<int>,c13:array<timestamp>>",
      inspector.getTypeName());
  assertEquals(null,
      inspector.getAllStructFieldRefs().get(0).getFieldComment());
  assertEquals(null, inspector.getStructFieldRef("UNKNOWN"));
  final List<String> fieldNames = ImmutableList.of(
      "field0", "field1", "field2", "field3", "field4",
      "field5", "field6", "field7", "field8", "field9",
      "field10", "field11", "field12");

  OrcStruct s1 = new OrcStruct(fieldNames);
  for(int i=0; i < 13; ++i) {
    s1.setFieldValue(i, i);
  }

  final List<Object> list = ImmutableList.of(
      (Object)0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12);
  assertEquals(list, inspector.getStructFieldsDataAsList(s1));
  ListObjectInspector listOI = (ListObjectInspector)
      inspector.getAllStructFieldRefs().get(12).getFieldObjectInspector();
  assertEquals(ObjectInspector.Category.LIST, listOI.getCategory());
  assertEquals(10, listOI.getListElement(list, 10));
  assertEquals(13, listOI.getListLength(list));

  final Map<Integer, Integer> map = ImmutableMap.of(1,2,
                                              2,4,
                                              3,6);
  MapObjectInspector mapOI = (MapObjectInspector)
      inspector.getAllStructFieldRefs().get(10).getFieldObjectInspector();
  assertEquals(3, mapOI.getMapSize(map));
  assertEquals(4, mapOI.getMapValueElement(map, 2));
}
 
开发者ID:facebookarchive,项目名称:hive-dwrf,代码行数:44,代码来源:TestOrcStruct.java

示例13: schema

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
/**
 * Specify the schema of the file. All Hive types are supported. If the {@link #declaredFields(Fields)} and
 * {@link #columns(StructTypeInfo) columns(...)} options were not specified, the declared {@link Fields} will be
 * derived from the supplied schema {@link StructTypeInfo}.
 * <p/>
 * Should the respective schema {@link TypeInfo} for a column be different from the {@link TypeInfo} in the actual
 * ORC File then a {@link ClassCastException} will be thrown when reading the records.
 */
public SourceBuilder schema(String typeInfoString) {
  checkExistingSchema();
  schemaTypeInfo = (StructTypeInfo) TypeInfoUtils.getTypeInfoFromTypeString(typeInfoString);
  return this;
}
 
开发者ID:HotelsDotCom,项目名称:corc,代码行数:14,代码来源:OrcFile.java


注:本文中的org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfoFromTypeString方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。