當前位置: 首頁>>代碼示例>>Java>>正文


Java TypeInfo類代碼示例

本文整理匯總了Java中org.apache.hadoop.hive.serde2.typeinfo.TypeInfo的典型用法代碼示例。如果您正苦於以下問題:Java TypeInfo類的具體用法?Java TypeInfo怎麽用?Java TypeInfo使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


TypeInfo類屬於org.apache.hadoop.hive.serde2.typeinfo包,在下文中一共展示了TypeInfo類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getFromTypeInfo

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; //導入依賴的package包/類
public static OrcSerde getFromTypeInfo( final Configuration config , final TypeInfo typeInfo )throws IOException{
  ObjectInspector objectInspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo( typeInfo );
  if( !( typeInfo instanceof StructTypeInfo ) ){
    throw new IOException( "Input type info is not StructTypeInfo : " + typeInfo.toString() );
  }
  String columnsName = "";
  String columnsType = "";
  List<TypeInfo> typeInfoList = ( (StructTypeInfo)typeInfo ).getAllStructFieldTypeInfos();
  List<StructField> structField = (List<StructField>)( ( (StructObjectInspector)objectInspector ).getAllStructFieldRefs() );
  for( int i = 0 ; i < structField.size() ; i++ ){
    if( ! columnsName.isEmpty() ){
      columnsName = columnsName.concat( "," );
      columnsType = columnsType.concat( "," );
    }
    columnsName = columnsName.concat( structField.get(i).getFieldName() );
    columnsType = columnsType.concat( typeInfoList.get(i).toString() );
  }

  OrcSerde serde = new OrcSerde();
  Properties table = new Properties();
  table.setProperty( serdeConstants.LIST_COLUMNS , columnsName );
  table.setProperty( serdeConstants.LIST_COLUMN_TYPES , columnsType );
  serde.initialize( config , table );

  return serde;
}
 
開發者ID:yahoojapan,項目名稱:dataplatform-schema-lib,代碼行數:27,代碼來源:OrcSerdeFactory.java

示例2: getAllReadTypeInfo

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; //導入依賴的package包/類
private StructTypeInfo getAllReadTypeInfo( final String columnNameProperty , final String columnTypeProperty ){
  ArrayList<TypeInfo> fieldTypes = TypeInfoUtils.getTypeInfosFromTypeString( columnTypeProperty );
  ArrayList<String> columnNames = new ArrayList<String>();
  if ( columnNameProperty != null && 0 < columnNameProperty.length() ) {
    String[] columnNameArray = columnNameProperty.split(",");
    for( int i = 0 ; i < columnNameArray.length ; i++ ){
      columnNames.add( columnNameArray[i] );
      filedIndexMap.put( columnNameArray[i] , i );
    }
  }
  StructTypeInfo rootType = new StructTypeInfo();

  rootType.setAllStructFieldNames( columnNames );
  rootType.setAllStructFieldTypeInfos( fieldTypes );

  return rootType;
}
 
開發者ID:yahoojapan,項目名稱:multiple-dimension-spread,代碼行數:18,代碼來源:MDSSerde.java

示例3: getColumnProjectionTypeInfo

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; //導入依賴的package包/類
private StructTypeInfo getColumnProjectionTypeInfo( final String columnNameProperty , final String columnTypeProperty , final String projectionColumnNames ){
  Set<String> columnNameSet = new HashSet<String>();
  for( String columnName : projectionColumnNames.split(",") ){
    columnNameSet.add( columnName );
  }

  ArrayList<TypeInfo> fieldTypes = TypeInfoUtils.getTypeInfosFromTypeString( columnTypeProperty );
  String[] splitNames = columnNameProperty.split(",");

  ArrayList<String> projectionColumnNameList = new ArrayList<String>();
  ArrayList<TypeInfo> projectionFieldTypeList = new ArrayList<TypeInfo>();
  for( int i = 0 ; i < fieldTypes.size() ; i++ ){
    if( columnNameSet.contains( splitNames[i] ) ){
      projectionColumnNameList.add( splitNames[i] );
      projectionFieldTypeList.add( fieldTypes.get(i) );
    }
    filedIndexMap.put( splitNames[i] , i );
  }
  StructTypeInfo rootType = new StructTypeInfo();

  rootType.setAllStructFieldNames( projectionColumnNameList );
  rootType.setAllStructFieldTypeInfos( projectionFieldTypeList );

  return rootType;
}
 
開發者ID:yahoojapan,項目名稱:multiple-dimension-spread,代碼行數:26,代碼來源:MDSSerde.java

示例4: MDSMapObjectInspector

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; //導入依賴的package包/類
public MDSMapObjectInspector( final MapTypeInfo typeInfo ){
  TypeInfo keyTypeInfo = typeInfo.getMapKeyTypeInfo();
  if( keyTypeInfo.getCategory() == ObjectInspector.Category.PRIMITIVE && ( (PrimitiveTypeInfo)keyTypeInfo ).getPrimitiveCategory() == PrimitiveCategory.STRING ){
    keyObjectInspector = PrimitiveObjectInspectorFactory.javaStringObjectInspector;
  }
  else{
    throw new RuntimeException( "Map key type is string only." );
  }

  valueObjectInspector = MDSObjectInspectorFactory.craeteObjectInspectorFromTypeInfo( typeInfo.getMapValueTypeInfo() ); 

  if( valueObjectInspector.getCategory() == ObjectInspector.Category.PRIMITIVE ){
    getField = new PrimitiveGetField( (PrimitiveObjectInspector)valueObjectInspector );
  }
  else if( valueObjectInspector.getCategory() == ObjectInspector.Category.UNION ){
    getField = new UnionGetField( (UnionTypeInfo)( typeInfo.getMapValueTypeInfo() ) );
  }
  else{
    getField = new NestedGetField();
  }
}
 
開發者ID:yahoojapan,項目名稱:multiple-dimension-spread,代碼行數:22,代碼來源:MDSMapObjectInspector.java

示例5: HiveVectorizedReaderSetting

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; //導入依賴的package包/類
public HiveVectorizedReaderSetting( final FileSplit split , final JobConf job , final HiveReaderSetting hiveReaderConfig ) throws IOException{
  this.hiveReaderConfig = hiveReaderConfig;

  rbCtx = Utilities.getVectorizedRowBatchCtx( job );
  partitionValues = new Object[rbCtx.getPartitionColumnCount()];
  if( 0 < partitionValues.length ){
    rbCtx.getPartitionValues( rbCtx, job, split, partitionValues );
  }

  TypeInfo[] typeInfos = rbCtx.getRowColumnTypeInfos();
  columnNames = rbCtx.getRowColumnNames();
  needColumnIds = createNeedColumnId( ColumnProjectionUtils.getReadColumnIDs( job ) );

  projectionColumn = new boolean[columnNames.length];
  assignors = new IColumnVectorAssignor[columnNames.length];
  for( int id : needColumnIds ){
    projectionColumn[id] = true;
    assignors[id] = ColumnVectorAssignorFactory.create( typeInfos[id] );
  }
}
 
開發者ID:yahoojapan,項目名稱:multiple-dimension-spread,代碼行數:21,代碼來源:HiveVectorizedReaderSetting.java

示例6: HiveStructSchema

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; //導入依賴的package包/類
public HiveStructSchema( final StructContainerField schema ) throws IOException{
  this.schema = schema;

  StructTypeInfo structSchema = new StructTypeInfo();
  ArrayList<String> childKey = new ArrayList<String>();
  ArrayList<TypeInfo> childTypeInfo = new ArrayList<TypeInfo>();
  for( String key : schema.getKeys() ){
    TypeInfo typeInfo = HiveSchemaFactory.getHiveSchema( schema.get( key ) );
    childKey.add( key );
    childTypeInfo.add( typeInfo );
  }
  structSchema.setAllStructFieldNames( childKey );
  structSchema.setAllStructFieldTypeInfos( childTypeInfo );

  hiveSchema = structSchema;
}
 
開發者ID:yahoojapan,項目名稱:dataplatform-schema-lib,代碼行數:17,代碼來源:HiveStructSchema.java

示例7: getGeneralSchemaFromHCatFieldSchema

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; //導入依賴的package包/類
public static IField getGeneralSchemaFromHCatFieldSchema( final HCatFieldSchema hCatFieldSchema ) throws IOException{
  if( hCatFieldSchema.getCategory() == HCatFieldSchema.Category.ARRAY ){
    HCatSchema arrayElementSchema = hCatFieldSchema.getArrayElementSchema();
    return new ArrayContainerField( hCatFieldSchema.getName() , getGeneralSchemaFromHCatFieldSchema( arrayElementSchema.get(0) ) );
  }
  else if( hCatFieldSchema.getCategory() == HCatFieldSchema.Category.MAP ){
    HCatSchema mapValueElementSchema = hCatFieldSchema.getMapValueSchema();
    return new MapContainerField( hCatFieldSchema.getName() , getGeneralSchemaFromHCatFieldSchema( mapValueElementSchema.get(0) ) );
  }
  else if( hCatFieldSchema.getCategory() == HCatFieldSchema.Category.STRUCT ){
    HCatSchema structSchema = hCatFieldSchema.getStructSubSchema();
    StructContainerField field = new StructContainerField( hCatFieldSchema.getName() );
    for( int i = 0 ; i < structSchema.size() ; i++ ){
      field.set( getGeneralSchemaFromHCatFieldSchema( structSchema.get(i) ) );
    }
    return field;
  }
  else if( hCatFieldSchema.getCategory() == HCatFieldSchema.Category.PRIMITIVE ){
    TypeInfo typeInfo = hCatFieldSchema.getTypeInfo();
    return HiveSchemaFactory.getGeneralSchema( hCatFieldSchema.getName() , typeInfo );
  }
  else{
    throw new IOException( "Unknown HCatalog field type : " + hCatFieldSchema.toString() );
  }
}
 
開發者ID:yahoojapan,項目名稱:dataplatform-schema-lib,代碼行數:26,代碼來源:HCatalogSchemaFactory.java

示例8: checkArgumentTypes

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; //導入依賴的package包/類
private static void checkArgumentTypes(TypeInfo[] parameters) throws UDFArgumentTypeException {
  if (parameters.length != 2) {
    throw new UDFArgumentTypeException(parameters.length - 1,
        "Exactly two arguments are expected.");
  }

  if (parameters[0].getCategory() != ObjectInspector.Category.PRIMITIVE) {
    throw new UDFArgumentTypeException(0, "Only primitive type arguments are accepted but "
        + parameters[0].getTypeName() + " is passed.");
  }

  if (parameters[1].getCategory() != ObjectInspector.Category.PRIMITIVE) {
    throw new UDFArgumentTypeException(1, "Only primitive type arguments are accepted but "
        + parameters[1].getTypeName() + " is passed.");
  }

  if (!acceptedPrimitiveCategory(((PrimitiveTypeInfo) parameters[0]).getPrimitiveCategory())) {
    throw new UDFArgumentTypeException(0, "Only numeric type arguments are accepted but "
        + parameters[0].getTypeName() + " is passed.");

  }
  if (!acceptedPrimitiveCategory(((PrimitiveTypeInfo) parameters[1]).getPrimitiveCategory())) {
    throw new UDFArgumentTypeException(1, "Only numeric type arguments are accepted but "
        + parameters[1].getTypeName() + " is passed.");
  }
}
 
開發者ID:myui,項目名稱:hive-udf-backports,代碼行數:27,代碼來源:GenericUDAFBinarySetFunctions.java

示例9: getIndexPredicateAnalyzer

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; //導入依賴的package包/類
/**
 * Provide the {@link IndexPredicateAnalyzer} that will analyze the queries and determine
 * what predicates and what column types are supported by Monarch.
 *
 * @param deserializer the deserializer
 * @return the index predicate analyzer
 */
protected static IndexPredicateAnalyzer getIndexPredicateAnalyzer(final MonarchSerDe deserializer) {
  IndexPredicateAnalyzer ipa = new IndexPredicateAnalyzer();

  /** support the only columns for which predicate test is allowed
   *   in Monarch, for the respective column-type **/
  List<String> columnNameList = deserializer.getColumnNames();
  List<TypeInfo> columnTypeList = deserializer.getColumnTypes();
  int size = columnNameList.size();
  if (size != columnTypeList.size()) {
    logger.warn("Column names and types do not match. Skipping predicate push-down.");
  } else {
    for (int i = 0; i < size; i++) {
      if (TYPE_HIVE_TO_MONARCH_MAP.get(columnTypeList.get(i).toString()) != null) {
        ipa.allowColumnName(columnNameList.get(i));
      }
    }
  }

  /** support following operations for push-down **/
  FUNC_HIVE_TO_MONARC_MAP.keySet().forEach(ipa::addComparisonOp);
  return ipa;
}
 
開發者ID:ampool,項目名稱:monarch,代碼行數:30,代碼來源:MonarchPredicateHandler.java

示例10: toMetacatType

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; //導入依賴的package包/類
@Override
public Type toMetacatType(final String type) {
    // Hack to fix presto "varchar" type coming in with no length which is required by Hive.
    final TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(
        "varchar".equals(type.toLowerCase()) ? serdeConstants.STRING_TYPE_NAME : type);
    ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
    // The standard struct object inspector forces field names to lower case, however in Metacat we need to preserve
    // the original case of the struct fields so we wrap it with our wrapper to force the fieldNames to keep
    // their original case
    if (typeInfo.getCategory().equals(ObjectInspector.Category.STRUCT)) {
        final StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
        final StandardStructObjectInspector objectInspector = (StandardStructObjectInspector) oi;
        oi = new HiveTypeConverter.SameCaseStandardStructObjectInspector(
            structTypeInfo.getAllStructFieldNames(), objectInspector);
    }
    return getCanonicalType(oi);
}
 
開發者ID:Netflix,項目名稱:metacat,代碼行數:18,代碼來源:HiveTypeConverter.java

示例11: createStructObjectInspector

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; //導入依賴的package包/類
public static LazySimpleStructObjectInspector createStructObjectInspector(TypeInfo type,
			LazySerDeParameters serdeParams) {
		StructTypeInfo structTypeInfo = (StructTypeInfo) type;
		List<String> fieldNames = structTypeInfo.getAllStructFieldNames();
		List<TypeInfo> fieldTypeInfos = structTypeInfo.getAllStructFieldTypeInfos();
		List<ObjectInspector> fieldObjectInspectors = new ArrayList<ObjectInspector>(fieldTypeInfos.size());

		for (int i = 0; i < fieldTypeInfos.size(); i++) {
			fieldObjectInspectors.add(createObjectInspector(fieldTypeInfos.get(i), serdeParams));
		}

		 return LazyObjectInspectorFactory.getLazySimpleStructObjectInspector(
			 fieldNames, fieldObjectInspectors, null,
			 serdeParams.getSeparators()[1],
			 serdeParams, ObjectInspectorOptions.JAVA);

//		return ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldObjectInspectors);
	}
 
開發者ID:mini666,項目名稱:hive-phoenix-handler,代碼行數:19,代碼來源:PhoenixObjectInspectorFactory.java

示例12: deserialize

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; //導入依賴的package包/類
@Override
public Object deserialize(Writable writable)
        throws SerDeException {
    Row row = (Row) writable;

    // Since this implementation uses a StructObjectInspector return a list of deserialized values in the same
    // order as the original properties.

    int i = 0;
    for (Map.Entry<String, TypeInfo> column : _columns) {
        String columnName = column.getKey();
        TypeInfo type = column.getValue();

        // Get the raw value from traversing the JSON map
        Object rawValue = getRawValue(columnName, row);
        // Deserialize the value to the expected type
        Object value = deserialize(type, rawValue);

        _values.set(i++, value);
    }

    return _values;
}
 
開發者ID:bazaarvoice,項目名稱:emodb,代碼行數:24,代碼來源:EmoSerDe.java

示例13: deserializeStruct

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; //導入依賴的package包/類
private Object deserializeStruct(StructTypeInfo type, Object data)
        throws SerDeException {
    if (!(data instanceof Map)) {
        throw new SerDeException("Value not of type map");
    }
    //noinspection unchecked
    Map<String, Object> map = (Map<String, Object>) data;

    List<String> fieldNames = type.getAllStructFieldNames();
    List<TypeInfo> fieldTypes = type.getAllStructFieldTypeInfos();

    // When deserializing a struct the returned value is a list of values in the same order as the field names.

    List<Object> values = Lists.newArrayListWithCapacity(fieldNames.size());
    for (int i=0; i < fieldNames.size(); i++) {
        Object rawValue = getRawValueOrNullIfAbsent(fieldNames.get(i), map);
        Object value = deserialize(fieldTypes.get(i), rawValue);
        values.add(value);
    }

    return values;
}
 
開發者ID:bazaarvoice,項目名稱:emodb,代碼行數:23,代碼來源:EmoSerDe.java

示例14: DynamoDBObjectInspector

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; //導入依賴的package包/類
public DynamoDBObjectInspector(List<String> columnNames, List<TypeInfo> columnTypes,
    Map<String, String> columnMappings) {
  this.columnNames = columnNames;
  this.hiveDynamoDBColumnMappings = columnMappings;

  if (columnNames == null) {
    throw new RuntimeException("Null columns names passed");
  }

  if (columnTypes == null) {
    throw new RuntimeException("Null columns types passed");
  }

  structFields = new ArrayList<>();
  columnNameStructFieldMap = new HashMap<>();

  // Constructing struct field list for each column
  for (int i = 0; i < columnNames.size(); i++) {
    DynamoDBField field = new DynamoDBField(i, columnNames.get(i).toLowerCase(), TypeInfoUtils
        .getStandardJavaObjectInspectorFromTypeInfo(columnTypes.get(i)), columnTypes.get(i)
        .getTypeName());
    structFields.add(field);
    columnNameStructFieldMap.put(columnNames.get(i), field);
  }
}
 
開發者ID:awslabs,項目名稱:emr-dynamodb-connector,代碼行數:26,代碼來源:DynamoDBObjectInspector.java

示例15: initialize

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; //導入依賴的package包/類
@Override
public void initialize(Configuration conf, Properties tbl) throws SerDeException {
  serdeParams = ShimsLoader.getHiveShims()
      .getSerDeParametersShim(conf, tbl, getClass().getName());
  String specifiedColumnMapping = tbl.getProperty(DynamoDBConstants.DYNAMODB_COLUMN_MAPPING);

  for (TypeInfo type : serdeParams.getColumnTypes()) {
    if (HiveDynamoDBTypeFactory.getTypeObjectFromHiveType(type.getTypeName()) == null) {
      throw new SerDeException("Unsupported type: " + type.getTypeName());
    }
  }

  log.info("Provided column mapping: " + specifiedColumnMapping);
  columnMappings = Maps.newHashMap();
  if (!Strings.isNullOrEmpty(specifiedColumnMapping)) {
    columnMappings = HiveDynamoDBUtil.getHiveToDynamoDBSchemaMapping(specifiedColumnMapping);
  }
  addDefaultColumnMappings(serdeParams.getColumnNames());

  log.info("Final column mapping: " + columnMappings);
  objectInspector = new DynamoDBObjectInspector(serdeParams.getColumnNames(), serdeParams
      .getColumnTypes(), columnMappings);
}
 
開發者ID:awslabs,項目名稱:emr-dynamodb-connector,代碼行數:24,代碼來源:DynamoDBExportSerDe.java


注:本文中的org.apache.hadoop.hive.serde2.typeinfo.TypeInfo類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。