当前位置: 首页>>代码示例>>Java>>正文


Java TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo方法代码示例

本文整理汇总了Java中org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo方法的典型用法代码示例。如果您正苦于以下问题:Java TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo方法的具体用法?Java TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo怎么用?Java TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils的用法示例。


在下文中一共展示了TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getFromTypeInfo

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
public static OrcSerde getFromTypeInfo( final Configuration config , final TypeInfo typeInfo )throws IOException{
  ObjectInspector objectInspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo( typeInfo );
  if( !( typeInfo instanceof StructTypeInfo ) ){
    throw new IOException( "Input type info is not StructTypeInfo : " + typeInfo.toString() );
  }
  String columnsName = "";
  String columnsType = "";
  List<TypeInfo> typeInfoList = ( (StructTypeInfo)typeInfo ).getAllStructFieldTypeInfos();
  List<StructField> structField = (List<StructField>)( ( (StructObjectInspector)objectInspector ).getAllStructFieldRefs() );
  for( int i = 0 ; i < structField.size() ; i++ ){
    if( ! columnsName.isEmpty() ){
      columnsName = columnsName.concat( "," );
      columnsType = columnsType.concat( "," );
    }
    columnsName = columnsName.concat( structField.get(i).getFieldName() );
    columnsType = columnsType.concat( typeInfoList.get(i).toString() );
  }

  OrcSerde serde = new OrcSerde();
  Properties table = new Properties();
  table.setProperty( serdeConstants.LIST_COLUMNS , columnsName );
  table.setProperty( serdeConstants.LIST_COLUMN_TYPES , columnsType );
  serde.initialize( config , table );

  return serde;
}
 
开发者ID:yahoojapan,项目名称:dataplatform-schema-lib,代码行数:27,代码来源:OrcSerdeFactory.java

示例2: toMetacatType

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Override
public Type toMetacatType(final String type) {
    // Hack to fix presto "varchar" type coming in with no length which is required by Hive.
    final TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(
        "varchar".equals(type.toLowerCase()) ? serdeConstants.STRING_TYPE_NAME : type);
    ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
    // The standard struct object inspector forces field names to lower case, however in Metacat we need to preserve
    // the original case of the struct fields so we wrap it with our wrapper to force the fieldNames to keep
    // their original case
    if (typeInfo.getCategory().equals(ObjectInspector.Category.STRUCT)) {
        final StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
        final StandardStructObjectInspector objectInspector = (StandardStructObjectInspector) oi;
        oi = new HiveTypeConverter.SameCaseStandardStructObjectInspector(
            structTypeInfo.getAllStructFieldNames(), objectInspector);
    }
    return getCanonicalType(oi);
}
 
开发者ID:Netflix,项目名称:metacat,代码行数:18,代码来源:HiveTypeConverter.java

示例3: DynamoDBObjectInspector

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
public DynamoDBObjectInspector(List<String> columnNames, List<TypeInfo> columnTypes,
    Map<String, String> columnMappings) {
  this.columnNames = columnNames;
  this.hiveDynamoDBColumnMappings = columnMappings;

  if (columnNames == null) {
    throw new RuntimeException("Null columns names passed");
  }

  if (columnTypes == null) {
    throw new RuntimeException("Null columns types passed");
  }

  structFields = new ArrayList<>();
  columnNameStructFieldMap = new HashMap<>();

  // Constructing struct field list for each column
  for (int i = 0; i < columnNames.size(); i++) {
    DynamoDBField field = new DynamoDBField(i, columnNames.get(i).toLowerCase(), TypeInfoUtils
        .getStandardJavaObjectInspectorFromTypeInfo(columnTypes.get(i)), columnTypes.get(i)
        .getTypeName());
    structFields.add(field);
    columnNameStructFieldMap.put(columnNames.get(i), field);
  }
}
 
开发者ID:awslabs,项目名称:emr-dynamodb-connector,代码行数:26,代码来源:DynamoDBObjectInspector.java

示例4: typical

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Test
public void typical() throws IOException {
  TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString("struct<a:string>");
  ObjectInspector inspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
  WriterOptions options = OrcFile.writerOptions(conf).inspector(inspector);

  Path path = new Path(temporaryFolder.getRoot().getCanonicalPath(), "part-00000");

  Writer writer = OrcFile.createWriter(path, options);
  writer.addRow(Arrays.asList("hello"));
  writer.close();

  try (OrcReader reader = new OrcReader(conf, path)) {
    List<Object> next = reader.next();
    assertThat(next.size(), is(1));
    assertThat(next.get(0), is((Object) "hello"));
    assertThat(reader.hasNext(), is(false));
  }

}
 
开发者ID:HotelsDotCom,项目名称:corc,代码行数:21,代码来源:OrcReaderTest.java

示例5: initialize

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
/**
 * An initialization function used to gather information about the table.
 * Typically, a SerDe implementation will be interested in fgthe list of
 * column names and their types. That information will be used to help perform
 * actual serialization and deserialization of data.
 */
@Override
public void initialize(Configuration conf, Properties tbl)
        throws SerDeException {
    // Get a list of the table's column names.
    String colNamesStr = tbl.getProperty(serdeConstants.LIST_COLUMNS);
    colNames = Arrays.asList(colNamesStr.split(","));

    // Get a list of TypeInfos for the columns. This list lines up with
    // the list of column names.
    String colTypesStr = tbl.getProperty(serdeConstants.LIST_COLUMN_TYPES);
    List<TypeInfo> colTypes =
            TypeInfoUtils.getTypeInfosFromTypeString(colTypesStr);

    rowTypeInfo =
            (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(colNames, colTypes);
    rowOI =
            TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(rowTypeInfo);
}
 
开发者ID:scaleoutsoftware,项目名称:hServer,代码行数:25,代码来源:JsonSerDe.java

示例6: initialize

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
/**
 * An initialization function used to gather information about the table.
 * Typically, a SerDe implementation will be interested in the list of
 * column names and their types. That information will be used to help perform
 * actual serialization and deserialization of data.
 */
@Override
public void initialize(Configuration conf, Properties tbl)
		throws SerDeException {
	// Get a list of the table's column names.
	String colNamesStr = tbl.getProperty(serdeConstants.LIST_COLUMNS);
	colNames = Arrays.asList(colNamesStr.split(","));

	// Get a list of TypeInfos for the columns. This list lines up with
	// the list of column names.
	String colTypesStr = tbl.getProperty(serdeConstants.LIST_COLUMN_TYPES);
	List<TypeInfo> colTypes =
			TypeInfoUtils.getTypeInfosFromTypeString(colTypesStr);

	rowTypeInfo =
			(StructTypeInfo) TypeInfoFactory.getStructTypeInfo(colNames, colTypes);
	rowOI =
			TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(rowTypeInfo);
}
 
开发者ID:micmiu,项目名称:bigdata-tutorial,代码行数:25,代码来源:JSONCDHSerDe.java

示例7: initialize

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Override
public void initialize(final Configuration conf, final Properties tbl)
		throws SerDeException {
	log.debug("conf="+conf);
	log.debug("tblProperties="+tbl);
	final String facetType = tbl.getProperty(ConfigurationUtil.SOLR_FACET_MAPPING);
	final String columnString = tbl.getProperty(ConfigurationUtil.SOLR_COLUMN_MAPPING);
	if (StringUtils.isBlank(facetType)) {
		if (StringUtils.isBlank(columnString)) {
			throw new SerDeException("No facet mapping found, using "+ ConfigurationUtil.SOLR_COLUMN_MAPPING);
		}
		final String[] columnNamesArray = ConfigurationUtil.getAllColumns(columnString);
		colNames = Arrays.asList(columnNamesArray);
		log.debug(ConfigurationUtil.SOLR_COLUMN_MAPPING+" = " + colNames);
		row = new ArrayList<Object>(columnNamesArray.length);
	} else {
		row = new ArrayList<Object>(2);
		colNames = Arrays.asList(StringUtils.split(tbl.getProperty(Constants.LIST_COLUMNS),","));
	}
	
	colTypes = TypeInfoUtils.getTypeInfosFromTypeString(tbl.getProperty(Constants.LIST_COLUMN_TYPES));
	rowTypeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(colNames, colTypes);
	rowOI = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(rowTypeInfo);
	log.debug("colNames="+colNames+" rowIO="+rowOI);
}
 
开发者ID:vroyer,项目名称:hive-solr-search,代码行数:26,代码来源:SolrSerDe.java

示例8: initialize

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Override
public void initialize(Configuration conf, Properties tbl) throws SerDeException {
  String columnNameProperty = tbl.getProperty(LIST_COLUMNS);
  String columnTypeProperty = tbl.getProperty(LIST_COLUMN_TYPES);
  List<String> columnNames = Arrays.asList(columnNameProperty.split(","));
  List<TypeInfo> columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);

  List<ObjectInspector> columnObjectInspectors = new ArrayList<ObjectInspector>(columnNames.size());
  ObjectInspector colObjectInspector;
  for (int col = 0; col < columnNames.size(); col++) {
    colObjectInspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(columnTypes.get(col));
    columnObjectInspectors.add(colObjectInspector);
  }

  cachedObjectInspector = ObjectInspectorFactory
    .getColumnarStructObjectInspector(columnNames, columnObjectInspectors);
}
 
开发者ID:apache,项目名称:lens,代码行数:18,代码来源:DBSerde.java

示例9: initialize

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
/**
 * An initialization function used to gather information about the table.
 * Typically, a SerDe implementation will be interested in the list of
 * column names and their types. That information will be used to help
 * perform actual serialization and deserialization of data.
 */
@Override
public void initialize(final Configuration conf, final Properties tbl)
		throws SerDeException {
	// Get a list of the table's column names.
	final String colNamesStr = tbl.getProperty(serdeConstants.LIST_COLUMNS);
	// Jai...change column names to lower case.
	colNames = Arrays.asList(colNamesStr.toLowerCase().split(","));
	// Get a list of TypeInfos for the columns. This list lines up with
	// the list of column names.
	final String colTypesStr = tbl
			.getProperty(serdeConstants.LIST_COLUMN_TYPES);
	final List<TypeInfo> colTypes = TypeInfoUtils
			.getTypeInfosFromTypeString(colTypesStr);
	rowTypeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(
			colNames, colTypes);
	rowOI = TypeInfoUtils
			.getStandardJavaObjectInspectorFromTypeInfo(rowTypeInfo);
}
 
开发者ID:jaibeermalik,项目名称:searchanalytics-bigdata,代码行数:25,代码来源:JSONSerDe.java

示例10: OrcStreamWriter

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
public OrcStreamWriter( final Configuration config, final Path path, final String schema ) throws IOException{
  FileSystem fs = FileSystem.get(config);
  long stripeSize = HiveConf.getLongVar(config, HiveConf.ConfVars.HIVE_ORC_DEFAULT_STRIPE_SIZE);
  CompressionKind compress = CompressionKind.valueOf(HiveConf.getVar(config, HiveConf.ConfVars.HIVE_ORC_DEFAULT_COMPRESS));
  int bufferSize = HiveConf.getIntVar(config, HiveConf.ConfVars.HIVE_ORC_DEFAULT_BUFFER_SIZE);
  int rowIndexStride =  HiveConf.getIntVar(config, HiveConf.ConfVars.HIVE_ORC_DEFAULT_ROW_INDEX_STRIDE);

  TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString( schema );
  ObjectInspector inspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo( typeInfo );
  writer = OrcFile.createWriter( fs, path, config, inspector, stripeSize, compress, bufferSize, rowIndexStride );
  formatter = OrcFormatterFactory.get( typeInfo );
}
 
开发者ID:yahoojapan,项目名称:dataplatform-schema-lib,代码行数:13,代码来源:OrcStreamWriter.java

示例11: initialize

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
public void initialize(String columnNameProperty, String columnTypeProperty) {
  List<String> columnNames = Arrays.asList(columnNameProperty.split(","));
  List<TypeInfo> columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);
  StructTypeInfo rowTypeInfo =
      (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(columnNames, columnTypes);
  rowOI = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(rowTypeInfo);
}
 
开发者ID:litao-buptsse,项目名称:flume-hive-batch-sink,代码行数:8,代码来源:TextDeserializer.java

示例12: getEvaluator

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Override
public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException {
    ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(parameters[0]);
    if (!ObjectInspectorUtils.compareSupported(oi)) {
        throw new UDFArgumentTypeException(0,
            "Cannot support comparison of map<> type or complex type containing map<>.");
    }
    return new GenericUDAFMaxRowEvaluator();
}
 
开发者ID:apache,项目名称:incubator-hivemall,代码行数:10,代码来源:MaxRowUDAF.java

示例13: initialize

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Override
public void initialize(Configuration conf, Properties tblProperties) throws SerDeException {
  colNames = Arrays.asList(tblProperties.getProperty(Constants.LIST_COLUMNS).split(","));
  colTypes = TypeInfoUtils.getTypeInfosFromTypeString(tblProperties.getProperty(Constants.LIST_COLUMN_TYPES));
  typeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(colNames, colTypes);
  inspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
  row = new ArrayList<>();
  enableFieldMapping = Boolean.valueOf(tblProperties.getProperty(ENABLE_FIELD_MAPPING, "false"));
}
 
开发者ID:lucidworks,项目名称:hive-solr,代码行数:10,代码来源:LWSerDe.java

示例14: initialize

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Override
public void initialize(Configuration conf, Properties tblProperties) throws SerDeException {

  colNames = Arrays.asList(tblProperties.getProperty(Constants.LIST_COLUMNS).split(","));
  colTypes = TypeInfoUtils
      .getTypeInfosFromTypeString(tblProperties.getProperty(Constants.LIST_COLUMN_TYPES));
  typeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(colNames, colTypes);
  inspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
  row = new ArrayList<>();
}
 
开发者ID:lucidworks,项目名称:hive-solr,代码行数:11,代码来源:LWSerDe.java

示例15: OrcWriter

import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
public OrcWriter(Configuration conf, Path path, StructTypeInfo typeInfo) throws IOException {
  ObjectInspector inspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
  WriterOptions writerOptions = OrcFile.writerOptions(conf).inspector(inspector);
  writer = OrcFile.createWriter(path, writerOptions);
}
 
开发者ID:HotelsDotCom,项目名称:corc,代码行数:6,代码来源:OrcWriter.java


注:本文中的org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。