当前位置: 首页>>代码示例>>Java>>正文


Java ExprNodeDesc类代码示例

本文整理汇总了Java中org.apache.hadoop.hive.ql.plan.ExprNodeDesc的典型用法代码示例。如果您正苦于以下问题:Java ExprNodeDesc类的具体用法?Java ExprNodeDesc怎么用?Java ExprNodeDesc使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ExprNodeDesc类属于org.apache.hadoop.hive.ql.plan包,在下文中一共展示了ExprNodeDesc类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getPushDownFilterNode

import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; //导入依赖的package包/类
@Override
public IExpressionNode getPushDownFilterNode(){
  if( nodeDescList.size() != 1 ){
    return null;
  }
  ExprNodeDesc columnDesc = nodeDescList.get( 0 );

  if( ! ( columnDesc instanceof ExprNodeColumnDesc ) ){
    return null;
  } 

  IExtractNode extractNode = CreateExtractNodeUtil.getExtractNode( columnDesc );
  if( extractNode == null ){
    return null;
  }

  ColumnType targetColumnType = MDSColumnTypeUtil.typeInfoToColumnType( columnDesc.getTypeInfo() );

  return new ExecuterNode( extractNode , new NullFilter( targetColumnType ) );
}
 
开发者ID:yahoojapan,项目名称:multiple-dimension-spread,代码行数:21,代码来源:NullHiveExpr.java

示例2: getPushDownFilterNode

import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; //导入依赖的package包/类
@Override
public IExpressionNode getPushDownFilterNode(){
  if( nodeDescList.size() != 1 ){
    return null;
  }
  ExprNodeDesc columnDesc = nodeDescList.get( 0 );

  if( ! ( columnDesc instanceof ExprNodeColumnDesc ) ){
    return null;
  } 

  IExtractNode extractNode = CreateExtractNodeUtil.getExtractNode( columnDesc ); 
  if( extractNode == null ){
    return null;
  }

  ColumnType targetColumnType = MDSColumnTypeUtil.typeInfoToColumnType( columnDesc.getTypeInfo() );

  return new ExecuterNode( extractNode , new NotNullFilter( targetColumnType ) );
}
 
开发者ID:yahoojapan,项目名称:multiple-dimension-spread,代码行数:21,代码来源:NotNullHiveExpr.java

示例3: decomposePredicate

import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; //导入依赖的package包/类
/**
 * Decompose the predicates (filter expressions) provided in hive query and if some
 * predicates can be pushed down to the Monarch, use them at query time reduce the
 * data queried from Monarch (Geode). The residual predicates (the ones that cannot
 * be executed on Monarch/Geode) will need to be executed in hive query engine.
 * <p>
 * The predicates to be executed on Monarch are decided by the column-type and
 * predicate operations. Following is the current list supported for execution
 * on Monarch/Geode side (as of 2015-12-23):
 * - Predicate Operations:
 * -- EQUAL
 * -- LESS THAN
 * -- LESS THAN OR EQUAL
 * - Column Types:
 * -- INT
 * -- LONG
 * -- STRING
 *
 * @param jobConf      the job configuration
 * @param deserializer the deserializer
 * @param exprNodeDesc the hive expression to be decpomposed
 * @return the decomposed predicate indicating which predicates will be executed on Monarch
 * and which predicates (residual) will be by Hive query engine
 */
public static DecomposedPredicate decomposePredicate(final JobConf jobConf,
                                                     final MonarchSerDe deserializer,
                                                     final ExprNodeDesc exprNodeDesc) {
  List<IndexSearchCondition> indexSearchConditions = new ArrayList<>(5);
  IndexPredicateAnalyzer ipa = getIndexPredicateAnalyzer(deserializer);
  ExprNodeDesc residual = ipa.analyzePredicate(exprNodeDesc, indexSearchConditions);
  ipa.clearAllowedColumnNames();
  if (indexSearchConditions.isEmpty()) {
    if (logger.isDebugEnabled())
      logger.debug("nothing to decompose. Returning");
    return null;
  }

  DecomposedPredicate dp = new DecomposedPredicate();
  dp.pushedPredicate = ipa.translateSearchConditions(indexSearchConditions);
  dp.residualPredicate = (ExprNodeGenericFuncDesc) residual;
  dp.pushedPredicateObject = null;

  if (logger.isDebugEnabled()) {
    logger.debug("[To Monarch -->] PushedPredicate= {}", dp.pushedPredicate);
    logger.debug("[In Hive    -->] ResidualPredicate= {}", dp.residualPredicate);
  }
  return dp;
}
 
开发者ID:ampool,项目名称:monarch,代码行数:49,代码来源:MonarchPredicateHandler.java

示例4: testPredicate_Single

import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; //导入依赖的package包/类
/**
 * Test single function predicates.
 *
 * @param exprNodeDesc  the node expression
 * @param checkPushed   the null check for pushed predicate
 * @param checkResidual the null check for residual predicate
 * @throws Exception
 */
@Test(dataProvider = "getDataForSinglePredicate")
public void testPredicate_Single(final ExprNodeDesc exprNodeDesc,
                                 final NullCheck checkPushed, final NullCheck checkResidual,
                                 final Filter expectedPredicate) throws
  Exception {

  DecomposedPredicate dp = MonarchPredicateHandler.decomposePredicate(null, serDe, exprNodeDesc);
  //assertNotNull(dp);
  //checkPushed.check(dp.pushedPredicate);
  //checkResidual.check(dp.residualPredicate);

  if (checkPushed.equals(NullCheck.NotNull)) {
    final String expression = Utilities.serializeExpression(dp.pushedPredicate);
    final String[] cols = Arrays.stream(properties.getProperty("columns").split(",")).toArray(String[]::new);
    MPredicateHolder[] phs1 = MonarchPredicateHandler.getPushDownPredicates(expression, cols);
    FilterList phs = MonarchPredicateHandler.getPushDownFilters(expression, cols);
    assertEquals(phs.getFilters().size(), 1);
    assertEquals(phs.getFilters().get(0).toString(), expectedPredicate.toString());
  }
}
 
开发者ID:ampool,项目名称:monarch,代码行数:29,代码来源:MonarchPredicateHandlerTest.java

示例5: convertToExpression

import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; //导入依赖的package包/类
/**
 * Convert generic Ampool filter(s) to the corresponding generic UDF(s).
 *
 * @param filter the Ampool filters
 * @param td the Ampool table descriptor
 * @return the generic ORC predicates
 */
public static ExprNodeDesc convertToExpression(final Filter filter, final TableDescriptor td)
    throws IOException {
  if (filter instanceof FilterList) {
    FilterList fl = (FilterList) filter;
    ExprNodeDesc expression = new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo,
        fl.getOperator() == FilterList.Operator.MUST_PASS_ALL ? new GenericUDFOPAnd()
            : new GenericUDFOPOr(),
        new ArrayList<>());
    for (Filter f : fl.getFilters()) {
      expression.getChildren().add(convertToExpression(f, td));
    }
    return expression;
  } else if (filter instanceof SingleColumnValueFilter) {
    SingleColumnValueFilter cf = (SingleColumnValueFilter) filter;
    if (!UDF_CONVERT_MAP.containsKey(cf.getOperator())) {
      throw new IOException("Failed to convert ComparisonOperator: " + cf.getOperator());
    }
    return UDF_CONVERT_MAP.get(cf.getOperator()).apply(cf, td);
  } else {
    return null;
  }
}
 
开发者ID:ampool,项目名称:monarch,代码行数:30,代码来源:OrcUtils.java

示例6: decomposePredicate

import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; //导入依赖的package包/类
@Override
	public DecomposedPredicate decomposePredicate(JobConf jobConf, Deserializer deserializer, ExprNodeDesc predicate) {
//		PhoenixSerDe phoenixSerDe = (PhoenixSerDe)deserializer;
//		String tableName = phoenixSerDe.getTableProperties().getProperty(PhoenixStorageHandlerConstants.PHOENIX_TABLE_NAME);
//		String predicateKey = PhoenixStorageHandlerUtil.getTableKeyOfSession(jobConf, tableName);
//		
//		if (LOG.isDebugEnabled()) {
//			LOG.debug("<<<<<<<<<< predicateKey : " + predicateKey + " >>>>>>>>>>");
//		}
//		
//		List<String> columnNameList = phoenixSerDe.getSerdeParams().getColumnNames();
//		PhoenixPredicateDecomposer predicateDecomposer = PhoenixPredicateDecomposerManager.createPredicateDecomposer(predicateKey, columnNameList);
//		
//		return predicateDecomposer.decomposePredicate(predicate);
		
		// 2016-04-04 modified by JeongMin Ju : Changed predicate push down processing to tez-way. reference PhoenixInputFormat.getSplits.
		return null;
	}
 
开发者ID:mini666,项目名称:hive-phoenix-handler,代码行数:19,代码来源:PhoenixStorageHandler.java

示例7: getFilter

import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; //导入依赖的package包/类
public DynamoDBFilter getFilter(DynamoDBFilterOperator operator, String columnName, String
    columnType, IndexSearchCondition condition) {
  switch (operator.getType()) {
    case UNARY:
      return getFilter(operator, columnName, columnType);
    case BINARY:
      return getFilter(operator, columnName, columnType, condition.getConstantDesc().getValue()
          .toString());
    case NARY:
      List<ExprNodeDesc> children = ShimsLoader.getHiveShims().getIndexExpression(condition)
          .getChildren();
      String[] values = new String[children.size() - 1];
      // This currently supports IN clause only
      // The first element is column name and rest of the elements are
      // the values it can take
      for (int i = 1; i < children.size(); i++) {
        values[i - 1] = ((ExprNodeConstantDesc) children.get(i)).getValue().toString();
      }
      return getFilter(operator, columnName, columnType, values);
    default:
      throw new RuntimeException("Unknown operator type. Operator: " + operator + " "
          + "OperatorType: " + operator.getType());
  }
}
 
开发者ID:awslabs,项目名称:emr-dynamodb-connector,代码行数:25,代码来源:DynamoDBFilterFactory.java

示例8: pushPredicate

import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; //导入依赖的package包/类
public DecomposedPredicate pushPredicate(Map<String, String> hiveTypeMapping, ExprNodeDesc
    predicate) {
  log.info("Checking predicates for pushdown in DynamoDB query");
  List<IndexSearchCondition> searchConditions = getGenericSearchConditions(hiveTypeMapping,
      predicate);
  log.info("Pushed predicates: " + searchConditions);
  if (searchConditions.isEmpty()) {
    return null;
  } else {
    List<IndexSearchCondition> finalSearchCondition =
        prioritizeSearchConditions(searchConditions);
    IndexPredicateAnalyzer analyzer = new IndexPredicateAnalyzer();
    DecomposedPredicate decomposedPredicate = new DecomposedPredicate();
    decomposedPredicate.pushedPredicate =
        analyzer.translateSearchConditions(finalSearchCondition);
    decomposedPredicate.residualPredicate = (ExprNodeGenericFuncDesc) predicate;
    return decomposedPredicate;
  }
}
 
开发者ID:awslabs,项目名称:emr-dynamodb-connector,代码行数:20,代码来源:DynamoDBFilterPushdown.java

示例9: getGenericSearchConditions

import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; //导入依赖的package包/类
private List<IndexSearchCondition> getGenericSearchConditions(Map<String, String> hiveTypeMapping,
    ExprNodeDesc predicate) {

  IndexPredicateAnalyzer analyzer = new IndexPredicateAnalyzer();

  // DynamoDB does not support filters on columns of types set
  for (Entry<String, String> entry : hiveTypeMapping.entrySet()) {
    if (eligibleHiveTypes.contains(entry.getValue())) {
      analyzer.allowColumnName(entry.getKey());
    }
  }

  for (DynamoDBFilterOperator op : DynamoDBFilterOperator.values()) {
    if (op.getHiveClass() != null) {
      analyzer.addComparisonOp(op.getHiveClass());
    }
  }

  List<IndexSearchCondition> searchConditions = new ArrayList<>();
  analyzer.analyzePredicate(predicate, searchConditions);
  return searchConditions;
}
 
开发者ID:awslabs,项目名称:emr-dynamodb-connector,代码行数:23,代码来源:DynamoDBFilterPushdown.java

示例10: getQueryFilter

import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; //导入依赖的package包/类
private DynamoDBQueryFilter getQueryFilter(JobConf conf, Map<String, String>
    hiveDynamoDBMapping, Map<String, String> hiveTypeMapping) throws IOException {
  if (hiveDynamoDBMapping == null) {
    /*
     * Column mapping may be null when user has mapped a DynamoDB item
     * onto a single hive map<string, string> column.
     */
    return new DynamoDBQueryFilter();
  }

  DynamoDBClient client = new DynamoDBClient(conf);
  String filterExprSerialized = conf.get(TableScanDesc.FILTER_EXPR_CONF_STR);
  if (filterExprSerialized == null) {
    return new DynamoDBQueryFilter();
  }
  ExprNodeDesc filterExpr =
      ShimsLoader.getHiveShims().deserializeExpression(filterExprSerialized);

  DynamoDBFilterPushdown pushdown = new DynamoDBFilterPushdown();
  List<KeySchemaElement> schema =
      client.describeTable(conf.get(DynamoDBConstants.TABLE_NAME)).getKeySchema();
  DynamoDBQueryFilter queryFilter = pushdown.predicateToDynamoDBFilter(
      schema, hiveDynamoDBMapping, hiveTypeMapping, filterExpr);
  return queryFilter;
}
 
开发者ID:awslabs,项目名称:emr-dynamodb-connector,代码行数:26,代码来源:HiveDynamoDBInputFormat.java

示例11: SolrTable

import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; //导入依赖的package包/类
public SolrTable(JobConf conf) {
	String filterExprSerialized = conf.get(TableScanDesc.FILTER_EXPR_CONF_STR);
	if (filterExprSerialized != null) {
		ExprNodeDesc filterExpr = Utilities.deserializeExpression(filterExprSerialized, conf);
		log.debug("filterExpr="+filterExpr.getExprString());
		SolrStorageHandler.buildQuery(filterExpr,fq,q);
	}
	
       this.url = ConfigurationUtil.getUrl(conf);
	this.qs = ConfigurationUtil.getQs(conf);
	this.fields = ConfigurationUtil.getAllColumns(conf.get(ConfigurationUtil.SOLR_COLUMN_MAPPING));
       this.facetType = conf.get(ConfigurationUtil.SOLR_FACET_MAPPING);
       log.info("solr.url="+url+" solr.qs="+qs+" fq="+fq+" q="+q);
       
       this.solrSplitSize = ConfigurationUtil.getSolrSplitSize(conf);
       this.outputBuffer = new ArrayList<SolrInputDocument>(solrSplitSize);
       this.server = new HttpSolrServer(url);
}
 
开发者ID:vroyer,项目名称:hive-solr-search,代码行数:19,代码来源:SolrTable.java

示例12: decomposePredicate

import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; //导入依赖的package包/类
@Override
public DecomposedPredicate decomposePredicate(JobConf jobConf, Deserializer deserializer, ExprNodeDesc predicate) {
	SolrSerDe serDe = (SolrSerDe)deserializer;
	log.debug(ConfigurationUtil.SOLR_COLUMN_MAPPING+"="+serDe.colNames+" predicate columns="+predicate.getCols());
	boolean found = false;
	for(String s:predicate.getCols()) {
		if (serDe.colNames.contains(s)) found=true;
	}
	if (!found) return null;
	log.debug(" predicate="+predicate.getExprString());

	
	DecomposedPredicate dp = new DecomposedPredicate();
	if (pushDownFilter(serDe.colNames, predicate, dp)) {
		log.debug("decomposed pushed: "+dp.pushedPredicate.getExprString());
		log.debug("decomposed residual: "+((dp.residualPredicate == null) ? null : dp.residualPredicate.getExprString()));
		return dp;
	}
	return null;
}
 
开发者ID:vroyer,项目名称:hive-solr-search,代码行数:21,代码来源:SolrStorageHandler.java

示例13: dumpFilterExpr

import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; //导入依赖的package包/类
protected static void dumpFilterExpr(ExprNodeDesc node) {
	if (node != null) {
		log.debug("dump: " + node.getClass().getName()+" name="+node.getName()+" expr="+node.getExprString()+ "[ ");
		if (node instanceof ExprNodeGenericFuncDesc) {
			log.debug(" func="+ ((ExprNodeGenericFuncDesc)node).getGenericUDF() );
		}
		List<ExprNodeDesc> children = node.getChildren();
		if (children != null) {
			for (ExprNodeDesc child: children) {
				if (child != null) dumpFilterExpr(child);
				log.debug(",");
			}
		}
		log.debug("]");
	}
}
 
开发者ID:vroyer,项目名称:hive-solr-search,代码行数:17,代码来源:SolrStorageHandler.java

示例14: decompose

import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; //导入依赖的package包/类
/**
 *
 *
 * @param conf JobConf
 * @param desc predicate expression node.
 * @return DecomposedPredicate containing translated search conditions the analyzer can support.
 */
public DecomposedPredicate decompose(JobConf conf, ExprNodeDesc desc) {


    IndexPredicateAnalyzer analyzer = newAnalyzer(conf);
    List<IndexSearchCondition> sConditions = new ArrayList<IndexSearchCondition>();
    ExprNodeDesc residualPredicate = analyzer.analyzePredicate(desc, sConditions);
    if(sConditions.size() == 0){
        if(log.isInfoEnabled())
            log.info("nothing to decompose. Returning");
        return null;
    }
    DecomposedPredicate decomposedPredicate  = new DecomposedPredicate();
    decomposedPredicate.pushedPredicate = analyzer.translateSearchConditions(sConditions);
    decomposedPredicate.residualPredicate = residualPredicate;
    return decomposedPredicate;
}
 
开发者ID:bfemiano,项目名称:accumulo-hive-storage-manager,代码行数:24,代码来源:AccumuloPredicateHandler.java

示例15: HiveExprOrNode

import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; //导入依赖的package包/类
public HiveExprOrNode( final List<ExprNodeDesc> childExprNodeDesc ){
  for( ExprNodeDesc nodeChild : childExprNodeDesc  ){
    if( nodeChild instanceof ExprNodeGenericFuncDesc ){
      addChildNode( (ExprNodeGenericFuncDesc)nodeChild );
    }
    else if( ( nodeChild instanceof ExprNodeColumnDesc ) || ( nodeChild instanceof ExprNodeFieldDesc ) ){
      childNodeList.add( new BooleanHiveExpr( nodeChild ) );
    }
    else{
      childNodeList.add( new UnsupportHiveExpr() );
    }
  }
}
 
开发者ID:yahoojapan,项目名称:multiple-dimension-spread,代码行数:14,代码来源:HiveExprOrNode.java


注:本文中的org.apache.hadoop.hive.ql.plan.ExprNodeDesc类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。