当前位置: 首页>>代码示例>>Java>>正文


Java Constants类代码示例

本文整理汇总了Java中org.apache.hadoop.hive.serde.Constants的典型用法代码示例。如果您正苦于以下问题:Java Constants类的具体用法?Java Constants怎么用?Java Constants使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


Constants类属于org.apache.hadoop.hive.serde包,在下文中一共展示了Constants类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getNumberObjectList

import org.apache.hadoop.hive.serde.Constants; //导入依赖的package包/类
public Object getNumberObjectList(List<String> data, String hiveType) {
  List<Object> doubleValues = new ArrayList<Object>();
  if (data == null) {
    return null;
  }
  String hiveSubType;
  if (hiveType.equals(DerivedHiveTypeConstants.DOUBLE_ARRAY_TYPE_NAME)) {
    hiveSubType = Constants.DOUBLE_TYPE_NAME;
  } else {
    hiveSubType = Constants.BIGINT_TYPE_NAME;
  }
  for (String dataElement : data) {
    doubleValues.add(getNumberObject(dataElement, hiveSubType));
  }
  return doubleValues;
}
 
开发者ID:awslabs,项目名称:emr-dynamodb-connector,代码行数:17,代码来源:DynamoDBDataParser.java

示例2: initialize

import org.apache.hadoop.hive.serde.Constants; //导入依赖的package包/类
@Override
public void initialize(final Configuration conf, final Properties tbl)
		throws SerDeException {
	log.debug("conf="+conf);
	log.debug("tblProperties="+tbl);
	final String facetType = tbl.getProperty(ConfigurationUtil.SOLR_FACET_MAPPING);
	final String columnString = tbl.getProperty(ConfigurationUtil.SOLR_COLUMN_MAPPING);
	if (StringUtils.isBlank(facetType)) {
		if (StringUtils.isBlank(columnString)) {
			throw new SerDeException("No facet mapping found, using "+ ConfigurationUtil.SOLR_COLUMN_MAPPING);
		}
		final String[] columnNamesArray = ConfigurationUtil.getAllColumns(columnString);
		colNames = Arrays.asList(columnNamesArray);
		log.debug(ConfigurationUtil.SOLR_COLUMN_MAPPING+" = " + colNames);
		row = new ArrayList<Object>(columnNamesArray.length);
	} else {
		row = new ArrayList<Object>(2);
		colNames = Arrays.asList(StringUtils.split(tbl.getProperty(Constants.LIST_COLUMNS),","));
	}
	
	colTypes = TypeInfoUtils.getTypeInfosFromTypeString(tbl.getProperty(Constants.LIST_COLUMN_TYPES));
	rowTypeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(colNames, colTypes);
	rowOI = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(rowTypeInfo);
	log.debug("colNames="+colNames+" rowIO="+rowOI);
}
 
开发者ID:vroyer,项目名称:hive-solr-search,代码行数:26,代码来源:SolrSerDe.java

示例3: readColumnarStruct

import org.apache.hadoop.hive.serde.Constants; //导入依赖的package包/类
private ColumnarStruct readColumnarStruct(BytesRefArrayWritable buff, String schema) throws SerDeException {
     Pattern pcols = Pattern.compile("[a-zA-Z_0-9]*[ ]");
     List<String> types = HiveRCSchemaUtil.parseSchemaTypes(schema);
     List<String> cols = HiveRCSchemaUtil.parseSchema(pcols, schema);

     List<FieldSchema> fieldSchemaList = new ArrayList<FieldSchema>(
         cols.size());

     for (int i = 0; i < cols.size(); i++) {
         fieldSchemaList.add(new FieldSchema(cols.get(i), HiveRCSchemaUtil
             .findPigDataType(types.get(i))));
     }

     Properties props = new Properties();

     props.setProperty(Constants.LIST_COLUMNS,
         HiveRCSchemaUtil.listToString(cols));
     props.setProperty(Constants.LIST_COLUMN_TYPES,
         HiveRCSchemaUtil.listToString(types));

     Configuration hiveConf = new HiveConf(conf, SessionState.class);
     ColumnarSerDe serde = new ColumnarSerDe();
     serde.initialize(hiveConf, props);

     return (ColumnarStruct) serde.deserialize(buff);
}
 
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:27,代码来源:TestHiveColumnarStorage.java

示例4: parseOrCreateColumnMapping

import org.apache.hadoop.hive.serde.Constants; //导入依赖的package包/类
/**
 * Parse the column mappping from table properties. If
 * cassandra.columns.mapping is defined in the property, use it to create
 * the mapping. Otherwise, create the mapping from table columns using the
 * default mapping.
 *
 * @param tbl table properties
 * @return A list of column names
 * @throws org.apache.hadoop.hive.serde2.SerDeException
 *
 */
protected List<String> parseOrCreateColumnMapping(Properties tbl) throws SerDeException {
    String prop = tbl.getProperty(AbstractCassandraSerDe.CASSANDRA_COL_MAPPING);

    if (prop != null) {
        return parseColumnMapping(prop);
    } else {
        String tblColumnStr = tbl.getProperty(Constants.LIST_COLUMNS);

        if (tblColumnStr != null) {
            //auto-create
            String mappingStr = createColumnMappingString(tblColumnStr);

            if (LOG.isDebugEnabled()) {
                LOG.debug("table column string: " + tblColumnStr);
                LOG.debug("Auto-created mapping string: " + mappingStr);
            }

            return Arrays.asList(mappingStr.split(","));

        } else {
            throw new SerDeException("Can't find table column definitions");
        }
    }
}
 
开发者ID:2013Commons,项目名称:hive-cassandra,代码行数:36,代码来源:CqlSerDe.java

示例5: parseOrCreateColumnMapping

import org.apache.hadoop.hive.serde.Constants; //导入依赖的package包/类
/**
 * Parse the column mappping from table properties. If
 * cassandra.columns.mapping is defined in the property, use it to create
 * the mapping. Otherwise, create the mapping from table columns using the
 * default mapping.
 *
 * @param tbl table properties
 * @return A list of column names
 * @throws SerDeException
 */
protected List<String> parseOrCreateColumnMapping(Properties tbl) throws SerDeException {
    String prop = tbl.getProperty(AbstractCassandraSerDe.CASSANDRA_COL_MAPPING);

    if (prop != null) {
        return parseColumnMapping(prop);
    } else {
        String tblColumnStr = tbl.getProperty(Constants.LIST_COLUMNS);

        if (tblColumnStr != null) {
            //auto-create
            String mappingStr = createColumnMappingString(tblColumnStr);

            if (LOG.isDebugEnabled()) {
                LOG.debug("table column string: " + tblColumnStr);
                LOG.debug("Auto-created mapping string: " + mappingStr);
            }

            return Arrays.asList(mappingStr.split(","));

        } else {
            throw new SerDeException("Can't find table column definitions");
        }
    }
}
 
开发者ID:2013Commons,项目名称:hive-cassandra,代码行数:35,代码来源:CassandraColumnSerDe.java

示例6: getCassandraColumnFamily

import org.apache.hadoop.hive.serde.Constants; //导入依赖的package包/类
/**
 * Parse cassandra column family name from table properties.
 *
 * @param tbl table properties
 * @return cassandra column family name
 * @throws SerDeException error parsing column family name
 */
protected String getCassandraColumnFamily(Properties tbl) throws SerDeException {
  String result = tbl.getProperty(CASSANDRA_CF_NAME);

  if (result == null) {

    result = tbl
        .getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_NAME);

    if (result == null) {
      throw new SerDeException("CassandraColumnFamily not defined" + tbl.toString());
    }

    if (result.indexOf(".") != -1) {
      result = result.substring(result.indexOf(".") + 1);
    }
  }

  return result;
}
 
开发者ID:dvasilen,项目名称:Hive-Cassandra,代码行数:27,代码来源:AbstractColumnSerDe.java

示例7: parseOrCreateColumnMapping

import org.apache.hadoop.hive.serde.Constants; //导入依赖的package包/类
/**
 * Parse the column mappping from table properties. If cassandra.columns.mapping
 * is defined in the property, use it to create the mapping. Otherwise, create the mapping from table
 * columns using the default mapping.
 *
 * @param tbl table properties
 * @return A list of column names
 * @throws SerDeException
 */
protected List<String> parseOrCreateColumnMapping(Properties tbl) throws SerDeException {
  String prop = tbl.getProperty(CASSANDRA_COL_MAPPING);

  if (prop != null) {
    return parseColumnMapping(prop);
  } else {
    String tblColumnStr = tbl.getProperty(Constants.LIST_COLUMNS);

    if (tblColumnStr != null) {
      //auto-create
      String mappingStr = createColumnMappingString(tblColumnStr);

      if (LOG.isDebugEnabled()) {
        LOG.debug("table column string: " + tblColumnStr);
        LOG.debug("Auto-created mapping string: " + mappingStr);
      }

      return Arrays.asList(mappingStr.split(","));

    } else {
      throw new SerDeException("Can't find table column definitions");
    }
  }
}
 
开发者ID:dvasilen,项目名称:Hive-Cassandra,代码行数:34,代码来源:AbstractColumnSerDe.java

示例8: getNumberObject

import org.apache.hadoop.hive.serde.Constants; //导入依赖的package包/类
public Object getNumberObject(String data, String hiveType) {
  if (data != null) {
    if (hiveType.equals(Constants.BIGINT_TYPE_NAME)) {
      return Long.parseLong(data);
    } else {
      return Double.parseDouble(data);
    }
  } else {
    return null;
  }
}
 
开发者ID:awslabs,项目名称:emr-dynamodb-connector,代码行数:12,代码来源:DynamoDBDataParser.java

示例9: initialize

import org.apache.hadoop.hive.serde.Constants; //导入依赖的package包/类
@Override
public void initialize(Configuration conf, Properties tblProperties) throws SerDeException {
  colNames = Arrays.asList(tblProperties.getProperty(Constants.LIST_COLUMNS).split(","));
  colTypes = TypeInfoUtils.getTypeInfosFromTypeString(tblProperties.getProperty(Constants.LIST_COLUMN_TYPES));
  typeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(colNames, colTypes);
  inspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
  row = new ArrayList<>();
  enableFieldMapping = Boolean.valueOf(tblProperties.getProperty(ENABLE_FIELD_MAPPING, "false"));
}
 
开发者ID:lucidworks,项目名称:hive-solr,代码行数:10,代码来源:LWSerDe.java

示例10: initialize

import org.apache.hadoop.hive.serde.Constants; //导入依赖的package包/类
@Override
public void initialize(Configuration conf, Properties tblProperties) throws SerDeException {

  colNames = Arrays.asList(tblProperties.getProperty(Constants.LIST_COLUMNS).split(","));
  colTypes = TypeInfoUtils
      .getTypeInfosFromTypeString(tblProperties.getProperty(Constants.LIST_COLUMN_TYPES));
  typeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(colNames, colTypes);
  inspector = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo);
  row = new ArrayList<>();
}
 
开发者ID:lucidworks,项目名称:hive-solr,代码行数:11,代码来源:LWSerDe.java

示例11: createTable

import org.apache.hadoop.hive.serde.Constants; //导入依赖的package包/类
public void createTable() throws TException {
    HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(hiveLocalMetaStore.getHiveConf());

    LOG.info("HIVE: Dropping hive table: " + propertyParser.getProperty(ConfigVars.HIVE_BOLT_TABLE_KEY));
    hiveClient.dropTable(propertyParser.getProperty(ConfigVars.HIVE_BOLT_DATABASE_KEY),
            propertyParser.getProperty(ConfigVars.HIVE_BOLT_TABLE_KEY),
            true, true);

    // Define the cols
    List<FieldSchema> cols = new ArrayList<FieldSchema>();
    cols.add(new FieldSchema("id", Constants.INT_TYPE_NAME, ""));
    cols.add(new FieldSchema("msg", Constants.STRING_TYPE_NAME, ""));

    // Values for the StorageDescriptor
    String location = new File(propertyParser.getProperty(
            ConfigVars.HIVE_TEST_TABLE_LOCATION_KEY)).getAbsolutePath();
    String inputFormat = "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat";
    String outputFormat = "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat";
    int numBuckets = 16;
    Map<String,String> orcProps = new HashMap<String, String>();
    orcProps.put("orc.compress", "NONE");
    SerDeInfo serDeInfo = new SerDeInfo(OrcSerde.class.getSimpleName(), OrcSerde.class.getName(), orcProps);
    List<String> bucketCols = new ArrayList<String>();
    bucketCols.add("id");

    // Build the StorageDescriptor
    StorageDescriptor sd = new StorageDescriptor();
    sd.setCols(cols);
    sd.setLocation(location);
    sd.setInputFormat(inputFormat);
    sd.setOutputFormat(outputFormat);
    sd.setNumBuckets(numBuckets);
    sd.setSerdeInfo(serDeInfo);
    sd.setBucketCols(bucketCols);
    sd.setSortCols(new ArrayList<Order>());
    sd.setParameters(new HashMap<String, String>());

    // Define the table
    Table tbl = new Table();
    tbl.setDbName(propertyParser.getProperty(ConfigVars.HIVE_BOLT_DATABASE_KEY));
    tbl.setTableName(propertyParser.getProperty(ConfigVars.HIVE_BOLT_TABLE_KEY));
    tbl.setSd(sd);
    tbl.setOwner(System.getProperty("user.name"));
    tbl.setParameters(new HashMap<String, String>());
    tbl.setViewOriginalText("");
    tbl.setViewExpandedText("");
    tbl.setTableType(TableType.MANAGED_TABLE.name());
    List<FieldSchema> partitions = new ArrayList<FieldSchema>();
    partitions.add(new FieldSchema("dt", Constants.STRING_TYPE_NAME, ""));
    tbl.setPartitionKeys(partitions);

    // Create the table
    hiveClient.createTable(tbl);

    // Describe the table
    Table createdTable = hiveClient.getTable(propertyParser.getProperty(ConfigVars.HIVE_BOLT_DATABASE_KEY),
            propertyParser.getProperty(ConfigVars.HIVE_BOLT_TABLE_KEY));
    LOG.info("HIVE: Created Table: " + createdTable.toString());
}
 
开发者ID:sakserv,项目名称:storm-topology-examples,代码行数:60,代码来源:KafkaHiveHdfsTopologyTest.java

示例12: SolrReader

import org.apache.hadoop.hive.serde.Constants; //导入依赖的package包/类
public SolrReader(JobConf conf, SolrSplit split) throws IOException {
	log.debug("jobConf=" + conf);

	List<Integer> readColIDs = getReadColumnIDs(conf);
	facetMapping = conf.get(ConfigurationUtil.SOLR_FACET_MAPPING);
	if (StringUtils.isBlank(facetMapping)) {
		String columnString = conf.get(ConfigurationUtil.SOLR_COLUMN_MAPPING);
		if (StringUtils.isBlank(columnString)) {
			throw new IOException("no column mapping found!");
		}
		solrColumns = ConfigurationUtil.getAllColumns(columnString);
		if (readColIDs.size() > solrColumns.length) {
			throw new IOException("read column count larger than that in column mapping string!");
		}
	} else {
		if (readColIDs.size() != 2) {
			throw new IOException("read column should be 2 with facet mapping");
		}
		solrColumns = conf.get(Constants.LIST_COLUMNS).split(",");
	}

	if (conf.get(Constants.LIST_COLUMNS) != null) {
		hiveColNames = Arrays.asList(StringUtils.split(conf.get(Constants.LIST_COLUMNS), ","));
	}

	if (conf.get(Constants.LIST_COLUMN_TYPES) != null) {
		hiveColTypes = TypeInfoUtils.getTypeInfosFromTypeString(conf.get(Constants.LIST_COLUMN_TYPES));
		for (TypeInfo ti : hiveColTypes) {
			rowOI.add(TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(ti));
		}
	}

	this.split = split;
	SolrTable table = new SolrTable(conf);
	cursor = table.getCursor((int) split.getStart(), (int) split.getLength());
}
 
开发者ID:vroyer,项目名称:hive-solr-search,代码行数:37,代码来源:SolrReader.java

示例13: initialize

import org.apache.hadoop.hive.serde.Constants; //导入依赖的package包/类
@Override
public void initialize(final Configuration conf, final Properties tbl) throws SerDeException {
    final String columnString = HiveCassandraUtils.getPropertyValue(tbl, HiveCassandraUtils.COLUMN_MAPPINGS, Constants.LIST_COLUMNS);
    if (!Strings.isNullOrEmpty(columnString))
        columnNames = Arrays.asList(columnString.split("[,:;]"));
    LOG.debug("column names in hive table: {}", columnNames);
    fieldCount = columnNames.size();

    String columnTypeProperty = tbl.getProperty(Constants.LIST_COLUMN_TYPES);
    if (!Strings.isNullOrEmpty(columnTypeProperty))
        columnTypesArray = Arrays.asList(columnTypeProperty.split("[,:;]"));
    LOG.debug("column types in hive table: {}", columnTypesArray);

    final List<ObjectInspector> fieldOIs = new ArrayList<ObjectInspector>(fieldCount);
    for (int i = 0; i < fieldCount; i++) {
        if (HIVE_TYPE_INT.equalsIgnoreCase(columnTypesArray.get(i))) {
            fieldOIs.add(PrimitiveObjectInspectorFactory.javaIntObjectInspector);
        } else if (Cql3SerDe.HIVE_TYPE_SMALLINT.equalsIgnoreCase(columnTypesArray.get(i))) {
            fieldOIs.add(PrimitiveObjectInspectorFactory.javaShortObjectInspector);
        } else if (Cql3SerDe.HIVE_TYPE_TINYINT.equalsIgnoreCase(columnTypesArray.get(i))) {
            fieldOIs.add(PrimitiveObjectInspectorFactory.javaByteObjectInspector);
        } else if (Cql3SerDe.HIVE_TYPE_BIGINT.equalsIgnoreCase(columnTypesArray.get(i))) {
            fieldOIs.add(PrimitiveObjectInspectorFactory.javaLongObjectInspector);
        } else if (Cql3SerDe.HIVE_TYPE_BOOLEAN.equalsIgnoreCase(columnTypesArray.get(i))) {
            fieldOIs.add(PrimitiveObjectInspectorFactory.javaBooleanObjectInspector);
        } else if (Cql3SerDe.HIVE_TYPE_FLOAT.equalsIgnoreCase(columnTypesArray.get(i))) {
            fieldOIs.add(PrimitiveObjectInspectorFactory.javaFloatObjectInspector);
        } else if (Cql3SerDe.HIVE_TYPE_DOUBLE.equalsIgnoreCase(columnTypesArray.get(i))) {
            fieldOIs.add(PrimitiveObjectInspectorFactory.javaDoubleObjectInspector);
        } else {
            // treat as string
            fieldOIs.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);
        }
    }
    objectInspector = ObjectInspectorFactory.getStandardStructObjectInspector(columnNames, fieldOIs);
    row = new ArrayList<Object>(fieldCount);
}
 
开发者ID:kernel164,项目名称:hive-cassandra-dsc,代码行数:38,代码来源:Cql3SerDe.java

示例14: setup

import org.apache.hadoop.hive.serde.Constants; //导入依赖的package包/类
/**
    * Does the configuration setup and schema parsing and setup.
    *
    * @param table_schema
    *            String
    * @param columnsToRead
    *            String
    */
   private void setup(String table_schema) {

if (table_schema == null)
    throw new RuntimeException(
	    "The table schema must be defined as colname type, colname type.  All types are hive types");

// create basic configuration for hdfs and hive
conf = new Configuration();
hiveConf = new HiveConf(conf, SessionState.class);

// parse the table_schema string
List<String> types = HiveRCSchemaUtil.parseSchemaTypes(table_schema);
List<String> cols = HiveRCSchemaUtil.parseSchema(pcols, table_schema);

List<FieldSchema> fieldSchemaList = new ArrayList<FieldSchema>(
	cols.size());

for (int i = 0; i < cols.size(); i++) {
    fieldSchemaList.add(new FieldSchema(cols.get(i), HiveRCSchemaUtil
	    .findPigDataType(types.get(i))));
}

pigSchema = new ResourceSchema(new Schema(fieldSchemaList));

props = new Properties();

// setting table schema properties for ColumnarSerDe
// these properties are never changed by the columns to read filter,
// because the columnar serde needs to now the
// complete format of each record.
props.setProperty(Constants.LIST_COLUMNS,
	HiveRCSchemaUtil.listToString(cols));
props.setProperty(Constants.LIST_COLUMN_TYPES,
	HiveRCSchemaUtil.listToString(types));

   }
 
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:45,代码来源:HiveColumnarLoader.java

示例15: setup

import org.apache.hadoop.hive.serde.Constants; //导入依赖的package包/类
/**
    * Does the configuration setup and schema parsing and setup.
    * 
    * @param table_schema
    *            String
    * @param columnsToRead
    *            String
    */
   private void setup(String table_schema) {

if (table_schema == null)
    throw new RuntimeException(
	    "The table schema must be defined as colname type, colname type.  All types are hive types");

// create basic configuration for hdfs and hive
conf = new Configuration();
hiveConf = new HiveConf(conf, SessionState.class);

// parse the table_schema string
List<String> types = HiveRCSchemaUtil.parseSchemaTypes(table_schema);
List<String> cols = HiveRCSchemaUtil.parseSchema(pcols, table_schema);

List<FieldSchema> fieldSchemaList = new ArrayList<FieldSchema>(
	cols.size());

for (int i = 0; i < cols.size(); i++) {
    fieldSchemaList.add(new FieldSchema(cols.get(i), HiveRCSchemaUtil
	    .findPigDataType(types.get(i))));
}

pigSchema = new ResourceSchema(new Schema(fieldSchemaList));

props = new Properties();

// setting table schema properties for ColumnarSerDe
// these properties are never changed by the columns to read filter,
// because the columnar serde needs to now the
// complete format of each record.
props.setProperty(Constants.LIST_COLUMNS,
	HiveRCSchemaUtil.listToString(cols));
props.setProperty(Constants.LIST_COLUMN_TYPES,
	HiveRCSchemaUtil.listToString(types));

   }
 
开发者ID:kaituo,项目名称:sedge,代码行数:45,代码来源:HiveColumnarLoader.java


注:本文中的org.apache.hadoop.hive.serde.Constants类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。