当前位置: 首页>>代码示例>>Java>>正文


Java ASTNode.getChild方法代码示例

本文整理汇总了Java中org.apache.hadoop.hive.ql.parse.ASTNode.getChild方法的典型用法代码示例。如果您正苦于以下问题:Java ASTNode.getChild方法的具体用法?Java ASTNode.getChild怎么用?Java ASTNode.getChild使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hive.ql.parse.ASTNode的用法示例。


在下文中一共展示了ASTNode.getChild方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getTableFromTabRefNode

import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
/**
 * Gets the table from tab ref node.
 *
 * @param tree the tree
 * @return the table from tab ref node
 */
public String getTableFromTabRefNode(ASTNode tree) {
  String table = "";
  ASTNode tabName = (ASTNode) tree.getChild(0);
  if (tabName.getChildCount() == 2) {
    table = tabName.getChild(0).getText() + "." + tabName.getChild(1).getText();
  } else {
    table = tabName.getChild(0).getText();
  }
  if (tree.getChildCount() > 1) {
    table = table + " " + tree.getChild(1).getText();
  }
  String[] tabSplit = table.split(" +");

  if (tabSplit.length == 2) {
    tableToAliasMap.put(tabSplit[0], tabSplit[1]);
  }
  return table;
}
 
开发者ID:apache,项目名称:lens,代码行数:25,代码来源:ColumnarSQLRewriter.java

示例2: updateOuterASTDuplicateAliases

import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
private void updateOuterASTDuplicateAliases(ASTNode node, Map<String, List<String>> aliasMap) {
  if (node.getToken().getType() == HiveParser.DOT) {
    String col = node.getChild(1).toString();
    for (Map.Entry<String, List<String>> entry : aliasMap.entrySet()) {
      if (entry.getValue().contains(col)) {
        try {
          node.setChild(1, HQLParser.parseExpr(entry.getKey()));
        } catch (LensException e) {
          log.error("Unable to parse select expression: {}.", entry.getKey());
        }
      }

    }
  }
  for (int i = 0; i < node.getChildCount(); i++) {
    ASTNode child = (ASTNode) node.getChild(i);
    updateOuterASTDuplicateAliases(child, aliasMap);
  }
}
 
开发者ID:apache,项目名称:lens,代码行数:20,代码来源:UnionQueryWriter.java

示例3: getNumFactTableInExpressions

import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
/**
 * Get number of fact columns used in the an expression
 *
 * @param node
 * @param count
 * @return Number of fact columns used in expression
 */
protected int getNumFactTableInExpressions(ASTNode node, MutableInt count) {

  if (node == null) {
    log.debug("ASTNode is null ");
    return 0;
  }
  if (node.getToken().getType() == HiveParser.TOK_TABLE_OR_COL) {
    String factAlias = getFactAlias();
    String table = node.getChild(0).getText();
    if (table.equals(factAlias)) {
      count.add(1);
    }
  }
  for (int i = 0; i < node.getChildCount(); i++) {
    ASTNode child = (ASTNode) node.getChild(i);
    getNumFactTableInExpressions(child, count);
  }

  return count.intValue();
}
 
开发者ID:apache,项目名称:lens,代码行数:28,代码来源:ColumnarSQLRewriter.java

示例4: updateAliasFromAST

import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
/**
 * Update alias and map old alias with new one
 *
 * @param from
 */
protected void updateAliasFromAST(ASTNode from) {

  String newAlias;
  String table;
  String dbAndTable = "";
  if (TOK_TABREF == from.getToken().getType()) {
    ASTNode tabName = (ASTNode) from.getChild(0);
    if (tabName.getChildCount() == 2) {
      dbAndTable = tabName.getChild(0).getText() + "_" + tabName.getChild(1).getText();
      table = tabName.getChild(1).getText();
    } else {
      table = tabName.getChild(0).getText();
    }
    if (from.getChildCount() > 1) {
      ASTNode alias = (ASTNode) from.getChild(1);
      newAlias = dbAndTable + "_" + from.getChild(1).getText();
      mapAliases.put(alias.getText(), table + "__" + newAlias);
      alias.getToken().setText(table + "__" + newAlias);
    }
  }
  for (int i = 0; i < from.getChildCount(); i++) {
    updateAliasFromAST((ASTNode) from.getChild(i));

  }
}
 
开发者ID:apache,项目名称:lens,代码行数:31,代码来源:ColumnarSQLRewriter.java

示例5: replaceColumnNames

import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
void replaceColumnNames(ASTNode node) {
  if (node == null) {
    return;
  }
  int nodeType = node.getToken().getType();
  if (nodeType == HiveParser.DOT) {
    ASTNode tabident = HQLParser.findNodeByPath(node, TOK_TABLE_OR_COL, Identifier);
    ASTNode colIdent = (ASTNode) node.getChild(1);
    String column = colIdent.getText().toLowerCase();
    String alias = tabident.getText().toLowerCase();
    if (aliasToNativeTableInfo.get(alias) != null) {
      colIdent.getToken().setText(aliasToNativeTableInfo.get(alias).getNativeColumn(column));
    }
  } else {
    // recurse down
    for (int i = 0; i < node.getChildCount(); i++) {
      ASTNode child = (ASTNode) node.getChild(i);
      replaceColumnNames(child);
    }
  }
}
 
开发者ID:apache,项目名称:lens,代码行数:22,代码来源:ColumnarSQLRewriter.java

示例6: generate

import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
@Override
public boolean generate(ASTNode hiveRoot, CommonTree sqlRoot, ASTNode currentHiveNode,
    CommonTree currentSqlNode, TranslateContext context) throws SqlXlateException {

  ASTNode ret = SqlXlateUtil.newASTNode(HiveParser.Identifier, currentSqlNode.getText());
  super.attachHiveNode(hiveRoot, currentHiveNode, ret);

  CommonTree node = (CommonTree) (currentSqlNode.getChildCount() == 1 ? currentSqlNode
      .getChild(0) : currentSqlNode.getChild(1));
  if (node.getType() == PantheraParser_PLSQLParser.ASTERISK) {
    return true;
  }
  ASTNode hiveNode = new ASTNode();
  GeneratorFactory.getGenerator(node).generateHiveAST(null, null, hiveNode, node,
      context);
  super.attachHiveNode(hiveRoot, currentHiveNode, (ASTNode) hiveNode.getChild(0));
  return true;
}
 
开发者ID:adrian-wang,项目名称:project-panthera-skin,代码行数:19,代码来源:CountGenerator.java

示例7: resolveUnionTok

import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
static void resolveUnionTok(List<String> re, ASTNode src) {
    for (int i = 0; i < src.getChildCount(); i++) {
        ASTNode tmpast = (ASTNode) src.getChild(i);
        if (((ASTNode) tmpast.getChild(0)).getToken().getType() == HiveParser.TOK_FROM) {
            getSrcTablesReCur(re, (ASTNode) tmpast.getChild(0));
        } else if (tmpast.getToken().getType() == HiveParser.TOK_UNIONALL) {
            resolveUnionTok(re, tmpast);
        }
    }
}
 
开发者ID:thomas-young-2013,项目名称:wherehowsX,代码行数:11,代码来源:HiveSqlAnalyzer.java

示例8: newObjectInspectorFromHiveType

import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
private static ObjectInspector newObjectInspectorFromHiveType(final ASTNode type) {
	// matching by token names, because token IDs (which are static final) drastically change between versions.
	switch (type.getToken().getText()) {
		case "TOK_STRING":
			return PrimitiveObjectInspectorFactory.writableStringObjectInspector;
		case "TOK_INT":
			return PrimitiveObjectInspectorFactory.writableIntObjectInspector;
		case "TOK_DOUBLE":
			return PrimitiveObjectInspectorFactory.writableDoubleObjectInspector;
		case "TOK_FLOAT":
			return PrimitiveObjectInspectorFactory.writableFloatObjectInspector;
		case "TOK_BIGINT":
			return PrimitiveObjectInspectorFactory.writableLongObjectInspector;
		case "TOK_BOOLEAN": {
			return PrimitiveObjectInspectorFactory.writableBooleanObjectInspector;
		}
		case "TOK_STRUCT": {
			final ASTNode tabColList = (ASTNode) type.getChild(0);
			final List<String> names = new ArrayList<>();
			final List<ObjectInspector> ois = new ArrayList<>();
			for (final Node tabCol : tabColList.getChildren()) {
				final ASTNode a = (ASTNode) tabCol;
				names.add(a.getChild(0).toString());
				ois.add(newObjectInspectorFromHiveType((ASTNode) a.getChild(1)));
			}
			return ObjectInspectorFactory.getStandardStructObjectInspector(names, ois);
		}
		case "TOK_MAP": {
			final ObjectInspector keyType = newObjectInspectorFromHiveType((ASTNode) type.getChild(0));
			final ObjectInspector valueType = newObjectInspectorFromHiveType((ASTNode) type.getChild(1));
			return ObjectInspectorFactory.getStandardMapObjectInspector(keyType, valueType);
		}
		case "TOK_LIST": {
			final ObjectInspector itemType = newObjectInspectorFromHiveType((ASTNode) type.getChild(0));
			return ObjectInspectorFactory.getStandardListObjectInspector(itemType);
		}
		default:
			throw new IllegalArgumentException("unsupported type: " + type.toStringTree());
	}
}
 
开发者ID:CyberAgent,项目名称:hive-jq-udtf,代码行数:41,代码来源:ObjectInspectors.java

示例9: generate

import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
@Override
public boolean generate(ASTNode hiveRoot, CommonTree sqlRoot, ASTNode currentHiveNode,
    CommonTree currentSqlNode, TranslateContext context) throws SqlXlateException {
  ASTNode ret = SqlXlateUtil.newASTNode(HiveParser.TOK_ORDERBY, "TOK_ORDERBY");
  ASTNode tokQuery = (ASTNode) currentHiveNode.getFirstChildWithType(HiveParser.TOK_QUERY);
  if (tokQuery != null) {
    super.attachHiveNode(hiveRoot, (ASTNode) tokQuery.getChild(1), ret);
  } else { // for windowing function over clause, e.g. over(order by col1)
    super.attachHiveNode(hiveRoot,  currentHiveNode, ret);
  }
  return super.generateChildren(hiveRoot, sqlRoot, ret, currentSqlNode, context);
}
 
开发者ID:adrian-wang,项目名称:project-panthera-skin,代码行数:13,代码来源:OrderByElementsGenerator.java

示例10: hasMeasure

import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
boolean hasMeasure(ASTNode node, CubeQueryContext cubeql) {
  int nodeType = node.getToken().getType();
  if (nodeType == TOK_TABLE_OR_COL || nodeType == DOT) {
    String colname;
    String tabname = null;

    if (node.getToken().getType() == TOK_TABLE_OR_COL) {
      colname = ((ASTNode) node.getChild(0)).getText();
    } else {
      // node in 'alias.column' format
      ASTNode tabident = HQLParser.findNodeByPath(node, TOK_TABLE_OR_COL, Identifier);
      ASTNode colIdent = (ASTNode) node.getChild(1);

      colname = colIdent.getText();
      tabname = tabident.getText();
    }

    String msrname = StringUtils.isBlank(tabname) ? colname : tabname + "." + colname;
    if (cubeql.hasCubeInQuery() && cubeql.isCubeMeasure(msrname)) {
      return true;
    }
  } else {
    for (int i = 0; i < node.getChildCount(); i++) {
      if (hasMeasure((ASTNode) node.getChild(i), cubeql)) {
        return true;
      }
    }
  }
  return false;
}
 
开发者ID:apache,项目名称:lens,代码行数:31,代码来源:GroupbyResolver.java

示例11: updateSelectPhrase

import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
private void updateSelectPhrase(CubeQueryContext cubeql, int index, ASTNode selectExpr) {
  int exprInd = index;
  ASTNode selectExprChild = (ASTNode) selectExpr.getChild(0);
  Set<String> cols = new HashSet<>();
  SelectPhraseContext sel = new SelectPhraseContext(selectExpr);
  addColumnsForSelectExpr(sel, selectExpr, cubeql.getSelectAST(), cols);
  String alias = selectExpr.getChildCount() > 1 ? selectExpr.getChild(1).getText() : null;
  String selectAlias;
  String selectFinalAlias = null;
  if (alias != null) {
    selectFinalAlias = alias;
    selectAlias = SELECT_ALIAS_PREFIX + exprInd;
  } else if (cols.size() == 1 && (selectExprChild.getToken().getType() == TOK_TABLE_OR_COL
    || selectExprChild.getToken().getType() == DOT)) {
    // select expression is same as the column
    selectAlias = cols.iterator().next().toLowerCase();
  } else {
    selectAlias = SELECT_ALIAS_PREFIX + exprInd;
    selectFinalAlias = HQLParser.getString(selectExprChild);
  }
  cubeql.addColumnsQueried(sel.getTblAliasToColumns());
  sel.setSelectAlias(selectAlias);
  sel.setFinalAlias(!StringUtils.isBlank(selectFinalAlias) ? "`" + selectFinalAlias + "`" : selectAlias);
  sel.setActualAlias(alias != null ? alias.toLowerCase() : null);
  cubeql.getSelectPhrases().add(exprInd, sel);
  //cubeQueryContext.addSelectPhrase(sel);
}
 
开发者ID:apache,项目名称:lens,代码行数:28,代码来源:GroupbyResolver.java

示例12: factFilterPushDown

import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
/**
 * Get fact filters for pushdown
 *
 * @param node
 */

public void factFilterPushDown(ASTNode node) {
  if (node == null) {
    log.debug("Join AST is null ");
    return;
  }

  String filterCond = "";
  if (node.getToken().getType() == HiveParser.KW_AND) {

    ASTNode parentNode = (ASTNode) node.getChild(0).getParent();
    // Skip the join conditions used as "and" for fact filter pushdown.
    // eg. inner join fact.id1 = dim.id and fact.id2 = dim.id
    if (parentNode.getChild(0).getChild(0).getType() == HiveParser.DOT
      && parentNode.getChild(0).getChild(1).getType() == HiveParser.DOT
      && parentNode.getChild(1).getChild(0).getType() == HiveParser.DOT
      && parentNode.getChild(1).getChild(1).getType() == HiveParser.DOT) {
      return;
    }
    ASTNode right = (ASTNode) node.getChild(1);
    filterCond = HQLParser.getString(right);
  }
  String factAlias = getFactAlias();

  if (filterCond.matches("(.*)" + factAlias + "(.*)")) {
    factFilterPush.append(filterCond).append(" and ");
  }

  for (int i = 0; i < node.getChildCount(); i++) {
    ASTNode child = (ASTNode) node.getChild(i);
    factFilterPushDown(child);
  }
}
 
开发者ID:apache,项目名称:lens,代码行数:39,代码来源:ColumnarSQLRewriter.java

示例13: getFactKeysFromNode

import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
/**
 * Get fact keys used in the AST
 *
 * @param node
 */
public void getFactKeysFromNode(ASTNode node) {
  if (node == null) {
    log.debug("AST is null ");
    return;
  }
  if (HQLParser.isAggregateAST(node)) {
    return;
  } else {
    if (node.getToken().getType() == HiveParser.DOT
            && node.getParent().getChild(0).getType() != HiveParser.Identifier) {
      String table = HQLParser.findNodeByPath(node, TOK_TABLE_OR_COL, Identifier).toString();
      String column = node.getChild(1).toString().toLowerCase();

      String factAlias = getFactAlias();

      if (table.equals(factAlias)) {
        factKeys.add(factAlias + "." + column);
      }
    }
  }

  for (int i = 0; i < node.getChildCount(); i++) {
    ASTNode child = (ASTNode) node.getChild(i);
    getFactKeysFromNode(child);
  }
}
 
开发者ID:apache,项目名称:lens,代码行数:32,代码来源:ColumnarSQLRewriter.java

示例14: getAllDimColumns

import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
/**
 *  Get all columns used for dimmension tables
 * @param node
 */
public void getAllDimColumns(ASTNode node) {

  if (node == null) {
    log.debug("Input AST is null ");
    return;
  }
  // Assuming column is specified with table.column format
  if (node.getToken().getType() == HiveParser.DOT) {
    String table = HQLParser.findNodeByPath(node, TOK_TABLE_OR_COL, Identifier).toString();
    String column = node.getChild(1).toString();

    Iterator iterator = tableToAliasMap.keySet().iterator();
    while (iterator.hasNext()) {
      String tab = (String) iterator.next();
      String alias = tableToAliasMap.get(tab);

      if ((table.equals(tab) || table.equals(alias)) && column != null) {
        LinkedHashSet<String> cols;
        if (!tableToAccessedColMap.containsKey(tab)) {
          cols = new LinkedHashSet<String>();
          cols.add(column);
          tableToAccessedColMap.put(tab, cols);
        } else {
          cols = tableToAccessedColMap.get(tab);
          if (!cols.contains(column)) {
            cols.add(column);
          }
        }
      }
    }
  }
  for (int i = 0; i < node.getChildCount(); i++) {
    ASTNode child = (ASTNode) node.getChild(i);
    getAllDimColumns(child);
  }
}
 
开发者ID:apache,项目名称:lens,代码行数:41,代码来源:ColumnarSQLRewriter.java

示例15: processSelectExpression

import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
/**
 * Get the inner and outer AST with alias for each child of StorageCandidate
 *
 * @param sc
 * @param outerSelectAst
 * @param innerSelectAST
 * @param aliasDecider
 * @throws LensException
 */
private void processSelectExpression(StorageCandidateHQLContext sc, ASTNode outerSelectAst, ASTNode innerSelectAST,
    AliasDecider aliasDecider) throws LensException {
  //ASTNode selectAST = sc.getQueryAst().getSelectAST();
  ASTNode selectAST = storageCandidateToSelectAstMap.get(sc);
  if (selectAST == null) {
    return;
  }
  // iterate over all children of the ast and get outer ast corresponding to it.
  for (int i = 0; i < selectAST.getChildCount(); i++) {
    ASTNode child = (ASTNode) selectAST.getChild(i);
    ASTNode outerSelect = new ASTNode(child);
    ASTNode selectExprAST = (ASTNode) child.getChild(0);
    ASTNode outerAST = getOuterAST(selectExprAST, innerSelectAST, aliasDecider, sc, true,
        cubeql.getBaseCube().getDimAttributeNames());
    outerSelect.addChild(outerAST);
    // has an alias? add it
    if (child.getChildCount() > 1) {
      outerSelect.addChild(child.getChild(1));
    }
    if (outerSelectAst.getChildCount() <= selectAST.getChildCount()) {
      if (outerSelectAst.getChild(i) == null) {
        outerSelectAst.addChild(outerSelect);
      } else if (HQLParser.getString((ASTNode) outerSelectAst.getChild(i).getChild(0)).equals(DEFAULT_MEASURE)) {
        outerSelectAst.replaceChildren(i, i, outerSelect);
      }
    }
  }
  sc.getQueryAst().setSelectAST(innerSelectAST);
}
 
开发者ID:apache,项目名称:lens,代码行数:39,代码来源:UnionQueryWriter.java


注:本文中的org.apache.hadoop.hive.ql.parse.ASTNode.getChild方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。