本文整理汇总了Java中org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfosFromTypeString方法的典型用法代码示例。如果您正苦于以下问题:Java TypeInfoUtils.getTypeInfosFromTypeString方法的具体用法?Java TypeInfoUtils.getTypeInfosFromTypeString怎么用?Java TypeInfoUtils.getTypeInfosFromTypeString使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils
的用法示例。
在下文中一共展示了TypeInfoUtils.getTypeInfosFromTypeString方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getAllReadTypeInfo
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
private StructTypeInfo getAllReadTypeInfo( final String columnNameProperty , final String columnTypeProperty ){
ArrayList<TypeInfo> fieldTypes = TypeInfoUtils.getTypeInfosFromTypeString( columnTypeProperty );
ArrayList<String> columnNames = new ArrayList<String>();
if ( columnNameProperty != null && 0 < columnNameProperty.length() ) {
String[] columnNameArray = columnNameProperty.split(",");
for( int i = 0 ; i < columnNameArray.length ; i++ ){
columnNames.add( columnNameArray[i] );
filedIndexMap.put( columnNameArray[i] , i );
}
}
StructTypeInfo rootType = new StructTypeInfo();
rootType.setAllStructFieldNames( columnNames );
rootType.setAllStructFieldTypeInfos( fieldTypes );
return rootType;
}
示例2: getColumnProjectionTypeInfo
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
private StructTypeInfo getColumnProjectionTypeInfo( final String columnNameProperty , final String columnTypeProperty , final String projectionColumnNames ){
Set<String> columnNameSet = new HashSet<String>();
for( String columnName : projectionColumnNames.split(",") ){
columnNameSet.add( columnName );
}
ArrayList<TypeInfo> fieldTypes = TypeInfoUtils.getTypeInfosFromTypeString( columnTypeProperty );
String[] splitNames = columnNameProperty.split(",");
ArrayList<String> projectionColumnNameList = new ArrayList<String>();
ArrayList<TypeInfo> projectionFieldTypeList = new ArrayList<TypeInfo>();
for( int i = 0 ; i < fieldTypes.size() ; i++ ){
if( columnNameSet.contains( splitNames[i] ) ){
projectionColumnNameList.add( splitNames[i] );
projectionFieldTypeList.add( fieldTypes.get(i) );
}
filedIndexMap.put( splitNames[i] , i );
}
StructTypeInfo rootType = new StructTypeInfo();
rootType.setAllStructFieldNames( projectionColumnNameList );
rootType.setAllStructFieldTypeInfos( projectionFieldTypeList );
return rootType;
}
示例3: structObjectInspector
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
static StandardStructObjectInspector structObjectInspector(Properties tableProperties) {
// extract column info - don't use Hive constants as they were renamed in 0.9 breaking compatibility
// the column names are saved as the given inspector to #serialize doesn't preserves them (maybe because it's an external table)
// use the class since StructType requires it ...
List<String> columnNames = StringUtils.tokenize(tableProperties.getProperty(HiveConstants.COLUMNS), ",");
List<TypeInfo> colTypes = TypeInfoUtils.getTypeInfosFromTypeString(tableProperties.getProperty(HiveConstants.COLUMNS_TYPES));
// create a standard writable Object Inspector - used later on by serialization/deserialization
List<ObjectInspector> inspectors = new ArrayList<ObjectInspector>();
for (TypeInfo typeInfo : colTypes) {
inspectors.add(TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(typeInfo));
}
return ObjectInspectorFactory.getStandardStructObjectInspector(columnNames, inspectors);
}
示例4: initialize
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
/**
* An initialization function used to gather information about the table.
* Typically, a SerDe implementation will be interested in fgthe list of
* column names and their types. That information will be used to help perform
* actual serialization and deserialization of data.
*/
@Override
public void initialize(Configuration conf, Properties tbl)
throws SerDeException {
// Get a list of the table's column names.
String colNamesStr = tbl.getProperty(serdeConstants.LIST_COLUMNS);
colNames = Arrays.asList(colNamesStr.split(","));
// Get a list of TypeInfos for the columns. This list lines up with
// the list of column names.
String colTypesStr = tbl.getProperty(serdeConstants.LIST_COLUMN_TYPES);
List<TypeInfo> colTypes =
TypeInfoUtils.getTypeInfosFromTypeString(colTypesStr);
rowTypeInfo =
(StructTypeInfo) TypeInfoFactory.getStructTypeInfo(colNames, colTypes);
rowOI =
TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(rowTypeInfo);
}
示例5: initialize
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
/**
* An initialization function used to gather information about the table.
* Typically, a SerDe implementation will be interested in the list of
* column names and their types. That information will be used to help perform
* actual serialization and deserialization of data.
*/
@Override
public void initialize(Configuration conf, Properties tbl)
throws SerDeException {
// Get a list of the table's column names.
String colNamesStr = tbl.getProperty(serdeConstants.LIST_COLUMNS);
colNames = Arrays.asList(colNamesStr.split(","));
// Get a list of TypeInfos for the columns. This list lines up with
// the list of column names.
String colTypesStr = tbl.getProperty(serdeConstants.LIST_COLUMN_TYPES);
List<TypeInfo> colTypes =
TypeInfoUtils.getTypeInfosFromTypeString(colTypesStr);
rowTypeInfo =
(StructTypeInfo) TypeInfoFactory.getStructTypeInfo(colNames, colTypes);
rowOI =
TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(rowTypeInfo);
}
示例6: initialize
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Override
public void initialize(final Configuration conf, final Properties tbl)
throws SerDeException {
log.debug("conf="+conf);
log.debug("tblProperties="+tbl);
final String facetType = tbl.getProperty(ConfigurationUtil.SOLR_FACET_MAPPING);
final String columnString = tbl.getProperty(ConfigurationUtil.SOLR_COLUMN_MAPPING);
if (StringUtils.isBlank(facetType)) {
if (StringUtils.isBlank(columnString)) {
throw new SerDeException("No facet mapping found, using "+ ConfigurationUtil.SOLR_COLUMN_MAPPING);
}
final String[] columnNamesArray = ConfigurationUtil.getAllColumns(columnString);
colNames = Arrays.asList(columnNamesArray);
log.debug(ConfigurationUtil.SOLR_COLUMN_MAPPING+" = " + colNames);
row = new ArrayList<Object>(columnNamesArray.length);
} else {
row = new ArrayList<Object>(2);
colNames = Arrays.asList(StringUtils.split(tbl.getProperty(Constants.LIST_COLUMNS),","));
}
colTypes = TypeInfoUtils.getTypeInfosFromTypeString(tbl.getProperty(Constants.LIST_COLUMN_TYPES));
rowTypeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(colNames, colTypes);
rowOI = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(rowTypeInfo);
log.debug("colNames="+colNames+" rowIO="+rowOI);
}
示例7: initialize
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Override
public void initialize(Configuration conf, Properties tbl) throws SerDeException {
String columnNameProperty = tbl.getProperty(LIST_COLUMNS);
String columnTypeProperty = tbl.getProperty(LIST_COLUMN_TYPES);
List<String> columnNames = Arrays.asList(columnNameProperty.split(","));
List<TypeInfo> columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);
List<ObjectInspector> columnObjectInspectors = new ArrayList<ObjectInspector>(columnNames.size());
ObjectInspector colObjectInspector;
for (int col = 0; col < columnNames.size(); col++) {
colObjectInspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(columnTypes.get(col));
columnObjectInspectors.add(colObjectInspector);
}
cachedObjectInspector = ObjectInspectorFactory
.getColumnarStructObjectInspector(columnNames, columnObjectInspectors);
}
示例8: initialize
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
/**
* An initialization function used to gather information about the table.
* Typically, a SerDe implementation will be interested in the list of
* column names and their types. That information will be used to help
* perform actual serialization and deserialization of data.
*/
@Override
public void initialize(final Configuration conf, final Properties tbl)
throws SerDeException {
// Get a list of the table's column names.
final String colNamesStr = tbl.getProperty(serdeConstants.LIST_COLUMNS);
// Jai...change column names to lower case.
colNames = Arrays.asList(colNamesStr.toLowerCase().split(","));
// Get a list of TypeInfos for the columns. This list lines up with
// the list of column names.
final String colTypesStr = tbl
.getProperty(serdeConstants.LIST_COLUMN_TYPES);
final List<TypeInfo> colTypes = TypeInfoUtils
.getTypeInfosFromTypeString(colTypesStr);
rowTypeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(
colNames, colTypes);
rowOI = TypeInfoUtils
.getStandardJavaObjectInspectorFromTypeInfo(rowTypeInfo);
}
示例9: initialize
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
public void initialize(String columnNameProperty, String columnTypeProperty) {
List<String> columnNames = Arrays.asList(columnNameProperty.split(","));
List<TypeInfo> columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);
StructTypeInfo rowTypeInfo =
(StructTypeInfo) TypeInfoFactory.getStructTypeInfo(columnNames, columnTypes);
rowOI = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(rowTypeInfo);
}
示例10: createLazyPhoenixInspector
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
private ObjectInspector createLazyPhoenixInspector(Configuration conf, Properties tbl) throws SerDeException {
List<String> columnNameList = Arrays.asList(tbl.getProperty(serdeConstants.LIST_COLUMNS).split(PhoenixStorageHandlerConstants.COMMA));
List<TypeInfo> columnTypeList = TypeInfoUtils.getTypeInfosFromTypeString(tbl.getProperty(serdeConstants.LIST_COLUMN_TYPES));
List<ObjectInspector> columnObjectInspectors = Lists.newArrayListWithExpectedSize(columnTypeList.size());
for (TypeInfo typeInfo : columnTypeList) {
columnObjectInspectors.add(PhoenixObjectInspectorFactory.createObjectInspector(typeInfo, serdeParams));
}
return LazyObjectInspectorFactory.getLazySimpleStructObjectInspector(columnNameList,
columnObjectInspectors, null, serdeParams.getSeparators()[0], serdeParams, ObjectInspectorOptions.JAVA);
}
示例11: initialize
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Override
public void initialize(Configuration conf, Properties tbl) throws SerDeException {
String columnNameProperty = tbl.getProperty(IOConstants.COLUMNS);
String columnTypeProperty = tbl.getProperty(IOConstants.COLUMNS_TYPES);
if (Strings.isEmpty(columnNameProperty)) {
columnNames = new ArrayList<String>();
} else {
columnNames = Arrays.asList(columnNameProperty.split(","));
}
if (Strings.isEmpty(columnTypeProperty)) {
columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(StringUtils.repeat("string", ":", columnNames.size()));
} else {
columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);
}
if (columnNames.size() != columnTypes.size()) {
throw new IllegalArgumentException("IndexRHiveSerde initialization failed. Number of column " +
"name and column type differs. columnNames = " + columnNames + ", columnTypes = " +
columnTypes);
}
TypeInfo rowTypeInfo = TypeInfoFactory.getStructTypeInfo(columnNames, columnTypes);
this.objInspector = new ArrayWritableObjectInspector((StructTypeInfo) rowTypeInfo);
stats = new SerDeStats();
serdeSize = 0;
}
示例12: initialize
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Override
public void initialize(Configuration conf, Properties tblProperties) throws SerDeException {
colNames = Arrays.asList(tblProperties.getProperty(Constants.LIST_COLUMNS).split(","));
colTypes = TypeInfoUtils.getTypeInfosFromTypeString(tblProperties.getProperty(Constants.LIST_COLUMN_TYPES));
typeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(colNames, colTypes);
inspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
row = new ArrayList<>();
enableFieldMapping = Boolean.valueOf(tblProperties.getProperty(ENABLE_FIELD_MAPPING, "false"));
}
示例13: initialize
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
@Override
public void initialize(Configuration conf, Properties tblProperties) throws SerDeException {
colNames = Arrays.asList(tblProperties.getProperty(Constants.LIST_COLUMNS).split(","));
colTypes = TypeInfoUtils
.getTypeInfosFromTypeString(tblProperties.getProperty(Constants.LIST_COLUMN_TYPES));
typeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(colNames, colTypes);
inspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
row = new ArrayList<>();
}
示例14: initialize
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
/**
* Initializes the RecordServiceSerde based on the table schema.
*/
@Override
public final void initialize(final Configuration conf, final Properties tbl)
throws SerDeException {
final List<TypeInfo> columnTypes;
final String columnNameProperty = tbl.getProperty(IOConstants.COLUMNS);
final String columnTypeProperty = tbl.getProperty(IOConstants.COLUMNS_TYPES);
if (columnNameProperty.length() == 0) {
columnNames_ = new ArrayList<String>();
} else {
columnNames_ = Arrays.asList(columnNameProperty.split(","));
}
if (columnTypeProperty.length() == 0) {
columnTypes = new ArrayList<TypeInfo>();
} else {
columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);
}
if (columnNames_.size() != columnTypes.size()) {
throw new IllegalArgumentException("Initialization failed. Number of column " +
"names and column types differs. columnNames = " + columnNames_ +
", columnTypes = " + columnTypes);
}
// Initialize the ObjectInspector based on the column type information in the table.
final TypeInfo rowTypeInfo =
TypeInfoFactory.getStructTypeInfo(columnNames_, columnTypes);
objInspector_ = new RecordServiceObjectInspector((StructTypeInfo) rowTypeInfo);
}
示例15: SolrReader
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; //导入方法依赖的package包/类
public SolrReader(JobConf conf, SolrSplit split) throws IOException {
log.debug("jobConf=" + conf);
List<Integer> readColIDs = getReadColumnIDs(conf);
facetMapping = conf.get(ConfigurationUtil.SOLR_FACET_MAPPING);
if (StringUtils.isBlank(facetMapping)) {
String columnString = conf.get(ConfigurationUtil.SOLR_COLUMN_MAPPING);
if (StringUtils.isBlank(columnString)) {
throw new IOException("no column mapping found!");
}
solrColumns = ConfigurationUtil.getAllColumns(columnString);
if (readColIDs.size() > solrColumns.length) {
throw new IOException("read column count larger than that in column mapping string!");
}
} else {
if (readColIDs.size() != 2) {
throw new IOException("read column should be 2 with facet mapping");
}
solrColumns = conf.get(Constants.LIST_COLUMNS).split(",");
}
if (conf.get(Constants.LIST_COLUMNS) != null) {
hiveColNames = Arrays.asList(StringUtils.split(conf.get(Constants.LIST_COLUMNS), ","));
}
if (conf.get(Constants.LIST_COLUMN_TYPES) != null) {
hiveColTypes = TypeInfoUtils.getTypeInfosFromTypeString(conf.get(Constants.LIST_COLUMN_TYPES));
for (TypeInfo ti : hiveColTypes) {
rowOI.add(TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(ti));
}
}
this.split = split;
SolrTable table = new SolrTable(conf);
cursor = table.getCursor((int) split.getStart(), (int) split.getLength());
}