本文整理汇总了Java中org.apache.hadoop.hive.serde2.Deserializer类的典型用法代码示例。如果您正苦于以下问题:Java Deserializer类的具体用法?Java Deserializer怎么用?Java Deserializer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Deserializer类属于org.apache.hadoop.hive.serde2包,在下文中一共展示了Deserializer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: decomposePredicate
import org.apache.hadoop.hive.serde2.Deserializer; //导入依赖的package包/类
@Override
public DecomposedPredicate decomposePredicate(JobConf jobConf, Deserializer deserializer, ExprNodeDesc predicate) {
// PhoenixSerDe phoenixSerDe = (PhoenixSerDe)deserializer;
// String tableName = phoenixSerDe.getTableProperties().getProperty(PhoenixStorageHandlerConstants.PHOENIX_TABLE_NAME);
// String predicateKey = PhoenixStorageHandlerUtil.getTableKeyOfSession(jobConf, tableName);
//
// if (LOG.isDebugEnabled()) {
// LOG.debug("<<<<<<<<<< predicateKey : " + predicateKey + " >>>>>>>>>>");
// }
//
// List<String> columnNameList = phoenixSerDe.getSerdeParams().getColumnNames();
// PhoenixPredicateDecomposer predicateDecomposer = PhoenixPredicateDecomposerManager.createPredicateDecomposer(predicateKey, columnNameList);
//
// return predicateDecomposer.decomposePredicate(predicate);
// 2016-04-04 modified by JeongMin Ju : Changed predicate push down processing to tez-way. reference PhoenixInputFormat.getSplits.
return null;
}
示例2: decomposePredicate
import org.apache.hadoop.hive.serde2.Deserializer; //导入依赖的package包/类
@Override
public DecomposedPredicate decomposePredicate(JobConf jobConf, Deserializer deserializer, ExprNodeDesc predicate) {
SolrSerDe serDe = (SolrSerDe)deserializer;
log.debug(ConfigurationUtil.SOLR_COLUMN_MAPPING+"="+serDe.colNames+" predicate columns="+predicate.getCols());
boolean found = false;
for(String s:predicate.getCols()) {
if (serDe.colNames.contains(s)) found=true;
}
if (!found) return null;
log.debug(" predicate="+predicate.getExprString());
DecomposedPredicate dp = new DecomposedPredicate();
if (pushDownFilter(serDe.colNames, predicate, dp)) {
log.debug("decomposed pushed: "+dp.pushedPredicate.getExprString());
log.debug("decomposed residual: "+((dp.residualPredicate == null) ? null : dp.residualPredicate.getExprString()));
return dp;
}
return null;
}
示例3: getFieldSchemas
import org.apache.hadoop.hive.serde2.Deserializer; //导入依赖的package包/类
/**
* First tries getting the {@code FieldSchema}s from the {@code HiveRegistrationUnit}'s columns, if set.
* Else, gets the {@code FieldSchema}s from the deserializer.
*/
private static List<FieldSchema> getFieldSchemas(HiveRegistrationUnit unit) {
List<Column> columns = unit.getColumns();
List<FieldSchema> fieldSchemas = new ArrayList<>();
if (columns != null && columns.size() > 0) {
fieldSchemas = getFieldSchemas(columns);
} else {
Deserializer deserializer = getDeserializer(unit);
if (deserializer != null) {
try {
fieldSchemas = MetaStoreUtils.getFieldsFromDeserializer(unit.getTableName(), deserializer);
} catch (SerDeException | MetaException e) {
LOG.warn("Encountered exception while getting fields from deserializer.", e);
}
}
}
return fieldSchemas;
}
示例4: createDeserializer
import org.apache.hadoop.hive.serde2.Deserializer; //导入依赖的package包/类
private static Deserializer createDeserializer(final Class<? extends Deserializer> clazz) {
try {
return clazz.getConstructor().newInstance();
} catch (ReflectiveOperationException e) {
throw new RuntimeException("error creating deserializer: " + clazz.getName(), e);
}
}
示例5: HiveBatchWriter
import org.apache.hadoop.hive.serde2.Deserializer; //导入依赖的package包/类
public HiveBatchWriter(Configuration conf, Deserializer deserializer, String file)
throws IOException, SerDeException {
this.deserializer = deserializer;
this.file = file;
OrcFile.WriterOptions writerOptions = OrcFile.writerOptions(conf);
writerOptions.inspector(deserializer.getObjectInspector());
writer = OrcFile.createWriter(new Path(file), writerOptions);
if (initCallbacks != null) {
initCallbacks.forEach(Callback::run);
}
}
示例6: decomposePredicate
import org.apache.hadoop.hive.serde2.Deserializer; //导入依赖的package包/类
@Override
public DecomposedPredicate decomposePredicate(JobConf jobConf, Deserializer deserializer,
ExprNodeDesc predicate) {
if (jobConf.getBoolean(DynamoDBConstants.DYNAMODB_FILTER_PUSHDOWN, true)) {
return new DynamoDBFilterPushdown()
.pushPredicate(HiveDynamoDBUtil.extractHiveTypeMapping(jobConf), predicate);
} else {
return null;
}
}
示例7: decomposePredicate
import org.apache.hadoop.hive.serde2.Deserializer; //导入依赖的package包/类
@Override
public DecomposedPredicate decomposePredicate(JobConf jobConf,
Deserializer deserializer, ExprNodeDesc predicate) {
// TODO: Implement push down to Kudu here.
DecomposedPredicate decomposedPredicate = new DecomposedPredicate();
return decomposedPredicate;
}
示例8: getTableObjectInspector
import org.apache.hadoop.hive.serde2.Deserializer; //导入依赖的package包/类
public static StructObjectInspector getTableObjectInspector(@SuppressWarnings("deprecation") Deserializer deserializer)
{
try {
ObjectInspector inspector = deserializer.getObjectInspector();
checkArgument(inspector.getCategory() == Category.STRUCT, "expected STRUCT: %s", inspector.getCategory());
return (StructObjectInspector) inspector;
}
catch (SerDeException e) {
throw Throwables.propagate(e);
}
}
示例9: getDeserializer
import org.apache.hadoop.hive.serde2.Deserializer; //导入依赖的package包/类
@SuppressWarnings("deprecation")
public static Deserializer getDeserializer(Properties schema)
{
String name = getDeserializerClassName(schema);
Deserializer deserializer = createDeserializer(getDeserializerClass(name));
initializeDeserializer(deserializer, schema);
return deserializer;
}
示例10: createDeserializer
import org.apache.hadoop.hive.serde2.Deserializer; //导入依赖的package包/类
@SuppressWarnings("deprecation")
private static Deserializer createDeserializer(Class<? extends Deserializer> clazz)
{
try {
return clazz.getConstructor().newInstance();
}
catch (ReflectiveOperationException e) {
throw new RuntimeException("error creating deserializer: " + clazz.getName(), e);
}
}
示例11: initializeDeserializer
import org.apache.hadoop.hive.serde2.Deserializer; //导入依赖的package包/类
@SuppressWarnings("deprecation")
private static void initializeDeserializer(Deserializer deserializer, Properties schema)
{
try {
deserializer.initialize(new Configuration(false), schema);
}
catch (SerDeException e) {
throw new RuntimeException("error initializing deserializer: " + deserializer.getClass().getName());
}
}
示例12: decomposePredicate
import org.apache.hadoop.hive.serde2.Deserializer; //导入依赖的package包/类
/**
* Cassandra requires that an IndexClause must contain at least one
* IndexExpression with an EQ operator on a configured index column. Other
* IndexExpression structs may be added to the IndexClause for non-indexed
* columns to further refine the results of the EQ expression.
* <p/>
* In order to push down the predicate filtering, we first get a list of
* indexed columns. If there are no indexed columns, we can't push down the
* predicate. We then walk down the predicate, and see if there is any
* filtering that matches the indexed columns. If there is no matching, we
* can't push down the predicate. For any matching column that is found, we
* need to verify that there is at least one equal operator. If there is no
* equal operator, we can't push down the predicate.
*/
@Override
public DecomposedPredicate decomposePredicate(JobConf jobConf, Deserializer deserializer, ExprNodeDesc predicate) {
try {
CqlSerDe cassandraSerde = (CqlSerDe) deserializer;
String host = jobConf.get(AbstractCassandraSerDe.CASSANDRA_HOST, AbstractCassandraSerDe.DEFAULT_CASSANDRA_HOST);
int port = jobConf.getInt(AbstractCassandraSerDe.CASSANDRA_PORT, Integer.parseInt(AbstractCassandraSerDe.DEFAULT_CASSANDRA_PORT));
String ksName = cassandraSerde.getCassandraKeyspace();
String cfName = cassandraSerde.getCassandraColumnFamily();
Set<ColumnDef> indexedColumns = CqlPushdownPredicate.getIndexedColumns(host, port, ksName, cfName);
if (indexedColumns.isEmpty()) {
return null;
}
IndexPredicateAnalyzer analyzer = CqlPushdownPredicate.newIndexPredicateAnalyzer(indexedColumns);
List<IndexSearchCondition> searchConditions = new ArrayList<IndexSearchCondition>();
ExprNodeDesc residualPredicate = analyzer.analyzePredicate(predicate, searchConditions);
if (searchConditions.isEmpty()) {
return null;
}
if (!CqlPushdownPredicate.verifySearchConditions(searchConditions)) {
return null;
}
DecomposedPredicate decomposedPredicate = new DecomposedPredicate();
decomposedPredicate.pushedPredicate = analyzer.translateSearchConditions(searchConditions);
decomposedPredicate.residualPredicate = residualPredicate;
return decomposedPredicate;
} catch (CassandraException e) {
//We couldn't get the indexed column names from Cassandra, return null and let Hive handle the filtering
logger.info("Error during predicate decomposition", e);
return null;
}
}
示例13: decomposePredicate
import org.apache.hadoop.hive.serde2.Deserializer; //导入依赖的package包/类
/**
* Cassandra requires that an IndexClause must contain at least one
* IndexExpression with an EQ operator on a configured index column. Other
* IndexExpression structs may be added to the IndexClause for non-indexed
* columns to further refine the results of the EQ expression.
*
* In order to push down the predicate filtering, we first get a list of
* indexed columns. If there are no indexed columns, we can't push down the
* predicate. We then walk down the predicate, and see if there is any
* filtering that matches the indexed columns. If there is no matching, we
* can't push down the predicate. For any matching column that is found, we
* need to verify that there is at least one equal operator. If there is no
* equal operator, we can't push down the predicate.
*/
@Override
public DecomposedPredicate decomposePredicate(JobConf jobConf, Deserializer deserializer, ExprNodeDesc predicate) {
try {
CassandraColumnSerDe cassandraSerde = (CassandraColumnSerDe) deserializer;
String host = jobConf.get(AbstractCassandraSerDe.CASSANDRA_HOST, AbstractCassandraSerDe.DEFAULT_CASSANDRA_HOST);
int port = jobConf.getInt(AbstractCassandraSerDe.CASSANDRA_PORT, Integer.parseInt(AbstractCassandraSerDe.DEFAULT_CASSANDRA_PORT));
String ksName = cassandraSerde.getCassandraKeyspace();
String cfName = cassandraSerde.getCassandraColumnFamily();
Set<ColumnDef> indexedColumns = CassandraPushdownPredicate.getIndexedColumns(host, port, ksName, cfName);
if (indexedColumns.isEmpty()) {
return null;
}
IndexPredicateAnalyzer analyzer = CassandraPushdownPredicate.newIndexPredicateAnalyzer(indexedColumns);
List<IndexSearchCondition> searchConditions = new ArrayList<IndexSearchCondition>();
ExprNodeDesc residualPredicate = analyzer.analyzePredicate(predicate, searchConditions);
if (searchConditions.isEmpty()) {
return null;
}
if (!CassandraPushdownPredicate.verifySearchConditions(searchConditions)) {
return null;
}
DecomposedPredicate decomposedPredicate = new DecomposedPredicate();
decomposedPredicate.pushedPredicate = analyzer.translateSearchConditions(searchConditions);
decomposedPredicate.residualPredicate = residualPredicate;
return decomposedPredicate;
} catch (CassandraException e) {
//We couldn't get the indexed column names from Cassandra, return null and let Hive handle the filtering
logger.info("Error during predicate decomposition", e);
return null;
}
}
示例14: decomposePredicate
import org.apache.hadoop.hive.serde2.Deserializer; //导入依赖的package包/类
@Override
public DecomposedPredicate decomposePredicate(JobConf conf,
Deserializer deserializer,
ExprNodeDesc desc) {
if(conf.get(AccumuloSerde.NO_ITERATOR_PUSHDOWN) == null){
return predicateHandler.decompose(conf, desc);
} else {
log.info("Set to ignore iterator. skipping predicate handler");
return null;
}
}
示例15: decomposePredicate
import org.apache.hadoop.hive.serde2.Deserializer; //导入依赖的package包/类
@Override
public DecomposedPredicate decomposePredicate(final JobConf jobConf,
final Deserializer deserializer,
final ExprNodeDesc exprNodeDesc) {
return MonarchPredicateHandler.decomposePredicate(jobConf, (MonarchSerDe) deserializer, exprNodeDesc);
}