当前位置: 首页>>代码示例>>Java>>正文


Java TypeInfoUtils类代码示例

本文整理汇总了Java中org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils的典型用法代码示例。如果您正苦于以下问题:Java TypeInfoUtils类的具体用法?Java TypeInfoUtils怎么用?Java TypeInfoUtils使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


TypeInfoUtils类属于org.apache.hadoop.hive.serde2.typeinfo包,在下文中一共展示了TypeInfoUtils类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getFromTypeInfo

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入依赖的package包/类
public static OrcSerde getFromTypeInfo( final Configuration config , final TypeInfo typeInfo )throws IOException{
  ObjectInspector objectInspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo( typeInfo );
  if( !( typeInfo instanceof StructTypeInfo ) ){
    throw new IOException( "Input type info is not StructTypeInfo : " + typeInfo.toString() );
  }
  String columnsName = "";
  String columnsType = "";
  List<TypeInfo> typeInfoList = ( (StructTypeInfo)typeInfo ).getAllStructFieldTypeInfos();
  List<StructField> structField = (List<StructField>)( ( (StructObjectInspector)objectInspector ).getAllStructFieldRefs() );
  for( int i = 0 ; i < structField.size() ; i++ ){
    if( ! columnsName.isEmpty() ){
      columnsName = columnsName.concat( "," );
      columnsType = columnsType.concat( "," );
    }
    columnsName = columnsName.concat( structField.get(i).getFieldName() );
    columnsType = columnsType.concat( typeInfoList.get(i).toString() );
  }

  OrcSerde serde = new OrcSerde();
  Properties table = new Properties();
  table.setProperty( serdeConstants.LIST_COLUMNS , columnsName );
  table.setProperty( serdeConstants.LIST_COLUMN_TYPES , columnsType );
  serde.initialize( config , table );

  return serde;
}
 
开发者ID:yahoojapan,项目名称:dataplatform-schema-lib,代码行数:27,代码来源:OrcSerdeFactory.java

示例2: getAllReadTypeInfo

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入依赖的package包/类
private StructTypeInfo getAllReadTypeInfo( final String columnNameProperty , final String columnTypeProperty ){
  ArrayList<TypeInfo> fieldTypes = TypeInfoUtils.getTypeInfosFromTypeString( columnTypeProperty );
  ArrayList<String> columnNames = new ArrayList<String>();
  if ( columnNameProperty != null && 0 < columnNameProperty.length() ) {
    String[] columnNameArray = columnNameProperty.split(",");
    for( int i = 0 ; i < columnNameArray.length ; i++ ){
      columnNames.add( columnNameArray[i] );
      filedIndexMap.put( columnNameArray[i] , i );
    }
  }
  StructTypeInfo rootType = new StructTypeInfo();

  rootType.setAllStructFieldNames( columnNames );
  rootType.setAllStructFieldTypeInfos( fieldTypes );

  return rootType;
}
 
开发者ID:yahoojapan,项目名称:multiple-dimension-spread,代码行数:18,代码来源:MDSSerde.java

示例3: getColumnProjectionTypeInfo

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入依赖的package包/类
private StructTypeInfo getColumnProjectionTypeInfo( final String columnNameProperty , final String columnTypeProperty , final String projectionColumnNames ){
  Set<String> columnNameSet = new HashSet<String>();
  for( String columnName : projectionColumnNames.split(",") ){
    columnNameSet.add( columnName );
  }

  ArrayList<TypeInfo> fieldTypes = TypeInfoUtils.getTypeInfosFromTypeString( columnTypeProperty );
  String[] splitNames = columnNameProperty.split(",");

  ArrayList<String> projectionColumnNameList = new ArrayList<String>();
  ArrayList<TypeInfo> projectionFieldTypeList = new ArrayList<TypeInfo>();
  for( int i = 0 ; i < fieldTypes.size() ; i++ ){
    if( columnNameSet.contains( splitNames[i] ) ){
      projectionColumnNameList.add( splitNames[i] );
      projectionFieldTypeList.add( fieldTypes.get(i) );
    }
    filedIndexMap.put( splitNames[i] , i );
  }
  StructTypeInfo rootType = new StructTypeInfo();

  rootType.setAllStructFieldNames( projectionColumnNameList );
  rootType.setAllStructFieldTypeInfos( projectionFieldTypeList );

  return rootType;
}
 
开发者ID:yahoojapan,项目名称:multiple-dimension-spread,代码行数:26,代码来源:MDSSerde.java

示例4: getRowType

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入依赖的package包/类
@Override
public RelDataType getRowType(RelDataTypeFactory typeFactory) {
  List<RelDataType> typeList = Lists.newArrayList();
  List<String> fieldNameList = Lists.newArrayList();

  List<FieldSchema> hiveFields = hiveTable.getCols();
  for(FieldSchema hiveField : hiveFields) {
    fieldNameList.add(hiveField.getName());
    typeList.add(getNullableRelDataTypeFromHiveType(
        typeFactory, TypeInfoUtils.getTypeInfoFromTypeString(hiveField.getType())));
  }

  for (FieldSchema field : hiveTable.getPartitionKeys()) {
    fieldNameList.add(field.getName());
    typeList.add(getNullableRelDataTypeFromHiveType(
        typeFactory, TypeInfoUtils.getTypeInfoFromTypeString(field.getType())));
  }

  return typeFactory.createStructType(typeList, fieldNameList);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:21,代码来源:DrillHiveTable.java

示例5: toMetacatType

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入依赖的package包/类
@Override
public Type toMetacatType(final String type) {
    // Hack to fix presto "varchar" type coming in with no length which is required by Hive.
    final TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(
        "varchar".equals(type.toLowerCase()) ? serdeConstants.STRING_TYPE_NAME : type);
    ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
    // The standard struct object inspector forces field names to lower case, however in Metacat we need to preserve
    // the original case of the struct fields so we wrap it with our wrapper to force the fieldNames to keep
    // their original case
    if (typeInfo.getCategory().equals(ObjectInspector.Category.STRUCT)) {
        final StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
        final StandardStructObjectInspector objectInspector = (StandardStructObjectInspector) oi;
        oi = new HiveTypeConverter.SameCaseStandardStructObjectInspector(
            structTypeInfo.getAllStructFieldNames(), objectInspector);
    }
    return getCanonicalType(oi);
}
 
开发者ID:Netflix,项目名称:metacat,代码行数:18,代码来源:HiveTypeConverter.java

示例6: DynamoDBObjectInspector

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入依赖的package包/类
public DynamoDBObjectInspector(List<String> columnNames, List<TypeInfo> columnTypes,
    Map<String, String> columnMappings) {
  this.columnNames = columnNames;
  this.hiveDynamoDBColumnMappings = columnMappings;

  if (columnNames == null) {
    throw new RuntimeException("Null columns names passed");
  }

  if (columnTypes == null) {
    throw new RuntimeException("Null columns types passed");
  }

  structFields = new ArrayList<>();
  columnNameStructFieldMap = new HashMap<>();

  // Constructing struct field list for each column
  for (int i = 0; i < columnNames.size(); i++) {
    DynamoDBField field = new DynamoDBField(i, columnNames.get(i).toLowerCase(), TypeInfoUtils
        .getStandardJavaObjectInspectorFromTypeInfo(columnTypes.get(i)), columnTypes.get(i)
        .getTypeName());
    structFields.add(field);
    columnNameStructFieldMap.put(columnNames.get(i), field);
  }
}
 
开发者ID:awslabs,项目名称:emr-dynamodb-connector,代码行数:26,代码来源:DynamoDBObjectInspector.java

示例7: getConstStringArray

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入依赖的package包/类
@Nullable
public static String[] getConstStringArray(@Nonnull final ObjectInspector oi)
        throws UDFArgumentException {
    if (!ObjectInspectorUtils.isConstantObjectInspector(oi)) {
        throw new UDFArgumentException("argument must be a constant value: "
                + TypeInfoUtils.getTypeInfoFromObjectInspector(oi));
    }
    ConstantObjectInspector constOI = (ConstantObjectInspector) oi;
    if (constOI.getCategory() != Category.LIST) {
        throw new UDFArgumentException("argument must be an array: "
                + TypeInfoUtils.getTypeInfoFromObjectInspector(oi));
    }
    final List<?> lst = (List<?>) constOI.getWritableConstantValue();
    if (lst == null) {
        return null;
    }
    final int size = lst.size();
    final String[] ary = new String[size];
    for (int i = 0; i < size; i++) {
        Object o = lst.get(i);
        if (o != null) {
            ary[i] = o.toString();
        }
    }
    return ary;
}
 
开发者ID:apache,项目名称:incubator-hivemall,代码行数:27,代码来源:HiveUtils.java

示例8: structObjectInspector

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入依赖的package包/类
static StandardStructObjectInspector structObjectInspector(Properties tableProperties) {
    // extract column info - don't use Hive constants as they were renamed in 0.9 breaking compatibility
    // the column names are saved as the given inspector to #serialize doesn't preserves them (maybe because it's an external table)
    // use the class since StructType requires it ...
    List<String> columnNames = StringUtils.tokenize(tableProperties.getProperty(HiveConstants.COLUMNS), ",");
    List<TypeInfo> colTypes = TypeInfoUtils.getTypeInfosFromTypeString(tableProperties.getProperty(HiveConstants.COLUMNS_TYPES));

    // create a standard writable Object Inspector - used later on by serialization/deserialization
    List<ObjectInspector> inspectors = new ArrayList<ObjectInspector>();

    for (TypeInfo typeInfo : colTypes) {
        inspectors.add(TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(typeInfo));
    }

    return ObjectInspectorFactory.getStandardStructObjectInspector(columnNames, inspectors);
}
 
开发者ID:xushjie1987,项目名称:es-hadoop-v2.2.0,代码行数:17,代码来源:HiveUtils.java

示例9: getRowType

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入依赖的package包/类
@Override
public RelDataType getRowType(RelDataTypeFactory typeFactory) {
  List<RelDataType> typeList = Lists.newArrayList();
  List<String> fieldNameList = Lists.newArrayList();

  List<FieldSchema> hiveFields = hiveTable.getColumnListsCache().getColumns(0);
  for(FieldSchema hiveField : hiveFields) {
    fieldNameList.add(hiveField.getName());
    typeList.add(getNullableRelDataTypeFromHiveType(
        typeFactory, TypeInfoUtils.getTypeInfoFromTypeString(hiveField.getType())));
  }

  for (FieldSchema field : hiveTable.getPartitionKeys()) {
    fieldNameList.add(field.getName());
    typeList.add(getNullableRelDataTypeFromHiveType(
        typeFactory, TypeInfoUtils.getTypeInfoFromTypeString(field.getType())));
  }

  return typeFactory.createStructType(typeList, fieldNameList);
}
 
开发者ID:axbaretto,项目名称:drill,代码行数:21,代码来源:DrillHiveTable.java

示例10: typical

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入依赖的package包/类
@Test
public void typical() throws IOException {
  TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString("struct<a:string>");
  ObjectInspector inspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
  WriterOptions options = OrcFile.writerOptions(conf).inspector(inspector);

  Path path = new Path(temporaryFolder.getRoot().getCanonicalPath(), "part-00000");

  Writer writer = OrcFile.createWriter(path, options);
  writer.addRow(Arrays.asList("hello"));
  writer.close();

  try (OrcReader reader = new OrcReader(conf, path)) {
    List<Object> next = reader.next();
    assertThat(next.size(), is(1));
    assertThat(next.get(0), is((Object) "hello"));
    assertThat(reader.hasNext(), is(false));
  }

}
 
开发者ID:HotelsDotCom,项目名称:corc,代码行数:21,代码来源:OrcReaderTest.java

示例11: initialize

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入依赖的package包/类
/**
 * An initialization function used to gather information about the table.
 * Typically, a SerDe implementation will be interested in fgthe list of
 * column names and their types. That information will be used to help perform
 * actual serialization and deserialization of data.
 */
@Override
public void initialize(Configuration conf, Properties tbl)
        throws SerDeException {
    // Get a list of the table's column names.
    String colNamesStr = tbl.getProperty(serdeConstants.LIST_COLUMNS);
    colNames = Arrays.asList(colNamesStr.split(","));

    // Get a list of TypeInfos for the columns. This list lines up with
    // the list of column names.
    String colTypesStr = tbl.getProperty(serdeConstants.LIST_COLUMN_TYPES);
    List<TypeInfo> colTypes =
            TypeInfoUtils.getTypeInfosFromTypeString(colTypesStr);

    rowTypeInfo =
            (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(colNames, colTypes);
    rowOI =
            TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(rowTypeInfo);
}
 
开发者ID:scaleoutsoftware,项目名称:hServer,代码行数:25,代码来源:JsonSerDe.java

示例12: initialize

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入依赖的package包/类
/**
 * An initialization function used to gather information about the table.
 * Typically, a SerDe implementation will be interested in the list of
 * column names and their types. That information will be used to help perform
 * actual serialization and deserialization of data.
 */
@Override
public void initialize(Configuration conf, Properties tbl)
		throws SerDeException {
	// Get a list of the table's column names.
	String colNamesStr = tbl.getProperty(serdeConstants.LIST_COLUMNS);
	colNames = Arrays.asList(colNamesStr.split(","));

	// Get a list of TypeInfos for the columns. This list lines up with
	// the list of column names.
	String colTypesStr = tbl.getProperty(serdeConstants.LIST_COLUMN_TYPES);
	List<TypeInfo> colTypes =
			TypeInfoUtils.getTypeInfosFromTypeString(colTypesStr);

	rowTypeInfo =
			(StructTypeInfo) TypeInfoFactory.getStructTypeInfo(colNames, colTypes);
	rowOI =
			TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(rowTypeInfo);
}
 
开发者ID:micmiu,项目名称:bigdata-tutorial,代码行数:25,代码来源:JSONCDHSerDe.java

示例13: initialize

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入依赖的package包/类
@Override
public void initialize(final Configuration conf, final Properties tbl)
		throws SerDeException {
	log.debug("conf="+conf);
	log.debug("tblProperties="+tbl);
	final String facetType = tbl.getProperty(ConfigurationUtil.SOLR_FACET_MAPPING);
	final String columnString = tbl.getProperty(ConfigurationUtil.SOLR_COLUMN_MAPPING);
	if (StringUtils.isBlank(facetType)) {
		if (StringUtils.isBlank(columnString)) {
			throw new SerDeException("No facet mapping found, using "+ ConfigurationUtil.SOLR_COLUMN_MAPPING);
		}
		final String[] columnNamesArray = ConfigurationUtil.getAllColumns(columnString);
		colNames = Arrays.asList(columnNamesArray);
		log.debug(ConfigurationUtil.SOLR_COLUMN_MAPPING+" = " + colNames);
		row = new ArrayList<Object>(columnNamesArray.length);
	} else {
		row = new ArrayList<Object>(2);
		colNames = Arrays.asList(StringUtils.split(tbl.getProperty(Constants.LIST_COLUMNS),","));
	}
	
	colTypes = TypeInfoUtils.getTypeInfosFromTypeString(tbl.getProperty(Constants.LIST_COLUMN_TYPES));
	rowTypeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(colNames, colTypes);
	rowOI = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(rowTypeInfo);
	log.debug("colNames="+colNames+" rowIO="+rowOI);
}
 
开发者ID:vroyer,项目名称:hive-solr-search,代码行数:26,代码来源:SolrSerDe.java

示例14: getMetadata

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入依赖的package包/类
@Override
public synchronized LensResultSetMetadata getMetadata() throws LensException {
  if (lensResultMeta == null) {
    JDBCResultSetMetadata jdbcResultSetMetadata = new JDBCResultSetMetadata();
    jdbcResultSetMetadata.setFieldSchemas(new ArrayList<FieldSchemaData>());
    try {
      ResultSetMetaData rsmeta = getRsMetadata();

      for (int i = 1; i <= rsmeta.getColumnCount(); i++) {
        FieldSchemaData col = new FieldSchemaData(rsmeta.getColumnName(i),
          TypeInfoUtils.getTypeInfoFromTypeString(getHiveTypeForSQLType(i, rsmeta)).getTypeName(),
          rsmeta.getColumnTypeName(i));
        jdbcResultSetMetadata.getFieldSchemas().add(col);
      }
    } catch (Exception e) {
      log.error("Error getting JDBC type information: {}", e.getMessage(), e);
      jdbcResultSetMetadata.setFieldSchemas(null);
    }
    lensResultMeta = jdbcResultSetMetadata;
  }
  return lensResultMeta;
}
 
开发者ID:apache,项目名称:lens,代码行数:23,代码来源:JDBCResultSet.java

示例15: initialize

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入依赖的package包/类
@Override
public void initialize(Configuration conf, Properties tbl) throws SerDeException {
  String columnNameProperty = tbl.getProperty(LIST_COLUMNS);
  String columnTypeProperty = tbl.getProperty(LIST_COLUMN_TYPES);
  List<String> columnNames = Arrays.asList(columnNameProperty.split(","));
  List<TypeInfo> columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);

  List<ObjectInspector> columnObjectInspectors = new ArrayList<ObjectInspector>(columnNames.size());
  ObjectInspector colObjectInspector;
  for (int col = 0; col < columnNames.size(); col++) {
    colObjectInspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(columnTypes.get(col));
    columnObjectInspectors.add(colObjectInspector);
  }

  cachedObjectInspector = ObjectInspectorFactory
    .getColumnarStructObjectInspector(columnNames, columnObjectInspectors);
}
 
开发者ID:apache,项目名称:lens,代码行数:18,代码来源:DBSerde.java


注:本文中的org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。