本文整理汇总了Java中org.apache.hadoop.hive.ql.parse.ASTNode.getChildCount方法的典型用法代码示例。如果您正苦于以下问题:Java ASTNode.getChildCount方法的具体用法?Java ASTNode.getChildCount怎么用?Java ASTNode.getChildCount使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.ql.parse.ASTNode
的用法示例。
在下文中一共展示了ASTNode.getChildCount方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getTableNamesForUpdateDelete
import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
public static void getTableNamesForUpdateDelete(
List<String> isrcTableNames, ASTNode input) {
if (input == null) {
return;
}
if (input.getToken().getType() == HiveParser.TOK_TABNAME) {
if (input.getChildCount() == 1) {
isrcTableNames.add(input.getChild(0).getText());
return;
} else if (input.getChildCount() == 2) {
isrcTableNames.add(input.getChild(0).getText());
return;
} else if (input.getChildCount() == 3) {
isrcTableNames.add(input.getChild(0).getText());
return;
}
} else {
int childCount = input.getChildCount();
for (int i = 0; i < childCount; i++) {
getTableNamesForUpdateDelete(isrcTableNames,
(ASTNode) input.getChild(i));
}
}
}
示例2: getColsForHavingAST
import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
private void getColsForHavingAST(CubeQueryContext cubeql, ASTNode clause) throws LensException {
if (clause == null) {
return;
}
// split having clause phrases to be column level sothat having clause can be pushed to multiple facts if required.
if (HQLParser.isAggregateAST(clause) || clause.getType() == HiveParser.TOK_TABLE_OR_COL
|| clause.getType() == HiveParser.DOT || clause.getChildCount() == 0) {
QueriedPhraseContext qur = new QueriedPhraseContext(clause);
qur.setAggregate(true);
getColsForTree(cubeql, clause, qur, true);
cubeql.addColumnsQueried(qur.getTblAliasToColumns());
cubeql.addQueriedPhrase(qur);
} else {
for (Node child : clause.getChildren()) {
getColsForHavingAST(cubeql, (ASTNode)child);
}
}
}
示例3: isExpressionsAnswerableFromFact
import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
/**
* Check if expression is answerable from fact, then push it to fact pushdown subquery
*
* @param node
* @return true if expressions is used
*/
public boolean isExpressionsAnswerableFromFact(ASTNode node) {
boolean isAnswerable = true;
for (int i = 0; i < node.getChildCount(); i++) {
if (node.getChild(i).getType() == HiveParser.TOK_SELEXPR) {
int cnt = getColumnCount((ASTNode) node.getChild(i));
if (cnt >= 2) {
if (cnt == getNumFactTableInExpressions((ASTNode) node.getChild(i), new MutableInt(0))) {
isAnswerable = true;
} else {
isAnswerable = false;
}
}
}
}
return isAnswerable;
}
示例4: getNumFactTableInExpressions
import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
/**
* Get number of fact columns used in the an expression
*
* @param node
* @param count
* @return Number of fact columns used in expression
*/
protected int getNumFactTableInExpressions(ASTNode node, MutableInt count) {
if (node == null) {
log.debug("ASTNode is null ");
return 0;
}
if (node.getToken().getType() == HiveParser.TOK_TABLE_OR_COL) {
String factAlias = getFactAlias();
String table = node.getChild(0).getText();
if (table.equals(factAlias)) {
count.add(1);
}
}
for (int i = 0; i < node.getChildCount(); i++) {
ASTNode child = (ASTNode) node.getChild(i);
getNumFactTableInExpressions(child, count);
}
return count.intValue();
}
示例5: hasMeasuresInDistinctClause
import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
private boolean hasMeasuresInDistinctClause(CubeQueryContext cubeql, ASTNode node, boolean hasDistinct) {
if (node == null) {
return false;
}
int exprTokenType = node.getToken().getType();
boolean isDistinct = hasDistinct;
if (exprTokenType == HiveParser.TOK_FUNCTIONDI || exprTokenType == HiveParser.TOK_SELECTDI) {
isDistinct = true;
} else if (cubeql.isCubeMeasure(node) && isDistinct) {
// Exit for the recursion
return true;
}
for (int i = 0; i < node.getChildCount(); i++) {
if (hasMeasuresInDistinctClause(cubeql, (ASTNode) node.getChild(i), isDistinct)) {
// Return on the first measure in distinct clause
return true;
}
}
return false;
}
示例6: updateOrderByWithFinalAlias
import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
static void updateOrderByWithFinalAlias(ASTNode orderby, ASTNode select) throws LensException{
if (orderby == null) {
return;
}
for (Node orderbyNode : orderby.getChildren()) {
ASTNode orderBychild = (ASTNode) orderbyNode;
for (Node selectNode : select.getChildren()) {
ASTNode selectChild = (ASTNode) selectNode;
if (selectChild.getChildCount() == 2) {
if (HQLParser.getString((ASTNode) selectChild.getChild(0))
.equals(HQLParser.getString((ASTNode) orderBychild.getChild(0)))) {
ASTNode alias = new ASTNode((ASTNode) selectChild.getChild(1));
if (!alias.toString().matches("\\S+")) {
throw new LensException(LensCubeErrorCode.ORDERBY_ALIAS_CONTAINING_WHITESPACE.getLensErrorInfo(), alias);
}
orderBychild.replaceChildren(0, 0, alias);
break;
}
}
}
}
}
示例7: getAllTablesfromFromAST
import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
/**
* Gets the all tablesfrom from ast.
*
* @param from the from
* @param fromTables the from tables
* @return the all tablesfrom from ast
*/
protected void getAllTablesfromFromAST(ASTNode from, ArrayList<String> fromTables) {
String table;
if (TOK_TABREF == from.getToken().getType()) {
ASTNode tabName = (ASTNode) from.getChild(0);
if (tabName.getChildCount() == 2) {
table = tabName.getChild(0).getText() + "." + tabName.getChild(1).getText();
} else {
table = tabName.getChild(0).getText();
}
if (from.getChildCount() > 1) {
table = table + " " + from.getChild(1).getText();
}
fromTables.add(table);
}
for (int i = 0; i < from.getChildCount(); i++) {
ASTNode child = (ASTNode) from.getChild(i);
getAllTablesfromFromAST(child, fromTables);
}
}
示例8: resolveUnionTok
import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
static void resolveUnionTok(List<String> re, ASTNode src) {
for (int i = 0; i < src.getChildCount(); i++) {
ASTNode tmpast = (ASTNode) src.getChild(i);
if (((ASTNode) tmpast.getChild(0)).getToken().getType() == HiveParser.TOK_FROM) {
getSrcTablesReCur(re, (ASTNode) tmpast.getChild(0));
} else if (tmpast.getToken().getType() == HiveParser.TOK_UNIONALL) {
resolveUnionTok(re, tmpast);
}
}
}
示例9: processGroupbyAST
import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
void processGroupbyAST(ASTNode ast)
throws LensException {
if (ast == null) {
return;
}
// iterate over children
for (int i = 0; i < ast.getChildCount(); i++) {
ASTNode exprNode = (ASTNode) ast.getChild(i);
if (hasBridgeCol(exprNode, tableAlias)) {
ast.setChild(i, getDotASTForExprAST(exprNode));
}
}
}
示例10: removeAggreagateFromDefaultColumns
import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
private void removeAggreagateFromDefaultColumns(ASTNode node) throws LensException {
for (int i = 0; i < node.getChildCount(); i++) {
ASTNode selectExpr = (ASTNode) node.getChild(i);
if (selectExpr.getChildCount() == 2) {
ASTNode column = (ASTNode) selectExpr.getChild(0);
if (HQLParser.isAggregateAST(column)
&& column.getChildCount() == 2) {
if (HQLParser.getString((ASTNode) column.getChild(1)).equals(DEFAULT_MEASURE)) {
selectExpr.getParent().setChild(i, getSelectExpr(null, (ASTNode) selectExpr.getChild(1), true));
}
}
}
}
}
示例11: getColsForSelectTree
import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
private void getColsForSelectTree(final CubeQueryContext cubeql) throws LensException {
int exprInd = 1;
for (int i = 0; i < cubeql.getSelectAST().getChildCount(); i++) {
ASTNode selectExpr = (ASTNode) cubeql.getSelectAST().getChild(i);
ASTNode selectExprChild = (ASTNode)selectExpr.getChild(0);
Set<String> cols = new HashSet<>();
SelectPhraseContext sel = new SelectPhraseContext(selectExpr);
addColumnsForSelectExpr(sel, selectExpr, cubeql.getSelectAST(), cols);
String alias = selectExpr.getChildCount() > 1 ? selectExpr.getChild(1).getText() : null;
String selectAlias;
String selectFinalAlias = null;
if (alias != null) {
selectFinalAlias = alias;
selectAlias = SELECT_ALIAS_PREFIX + exprInd;
} else if (cols.size() == 1 && (selectExprChild.getToken().getType() == TOK_TABLE_OR_COL
|| selectExprChild.getToken().getType() == DOT)) {
// select expression is same as the column
selectAlias = cols.iterator().next().toLowerCase();
} else {
selectAlias = SELECT_ALIAS_PREFIX + exprInd;
selectFinalAlias = HQLParser.getString(selectExprChild);
}
exprInd++;
cubeql.addColumnsQueried(sel.getTblAliasToColumns());
sel.setSelectAlias(selectAlias);
sel.setFinalAlias(!StringUtils.isBlank(selectFinalAlias) ? "`" + selectFinalAlias + "`" : selectAlias);
sel.setActualAlias(alias != null ? alias.toLowerCase() : null);
cubeql.addSelectPhrase(sel);
}
}
示例12: getGroupbyExpressions
import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
private List<String> getGroupbyExpressions(ASTNode node) {
List<String> list = new ArrayList<>();
if (node == null) {
return list;
}
for (int i = 0; i < node.getChildCount(); i++) {
list.add(HQLParser.getString((ASTNode) node.getChild(i)));
}
return list;
}
示例13: isSelectCountDistinctStar
import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
/**
* Check if subtree represents select count(*)
*
* @param select
* @return
*/
public static boolean isSelectCountDistinctStar(ASTNode select) {
ASTNode selexpr = (ASTNode) select.getChild(0);
if (selexpr.getChild(0).getType() != HiveParser.TOK_FUNCTIONDI) {
return false;
}
ASTNode functiondi = (ASTNode) selexpr.getChild(0);
if (functiondi.getChildCount() == 1
&& functiondi.getChild(0).getType() == HiveParser.Identifier
&& functiondi.getChild(0).getText() == "count") {
return true;
}
return false;
}
示例14: hasTableOrColumn
import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
private boolean hasTableOrColumn(ASTNode node) {
if (node.getToken() != null) {
if (node.getToken().getType() == HiveParser.TOK_TABLE_OR_COL) {
return true;
}
}
for (int i = 0; i < node.getChildCount(); i++) {
if (hasTableOrColumn((ASTNode) node.getChild(i))) {
return true;
}
}
return false;
}
示例15: getFactKeysFromNode
import org.apache.hadoop.hive.ql.parse.ASTNode; //导入方法依赖的package包/类
/**
* Get fact keys used in the AST
*
* @param node
*/
public void getFactKeysFromNode(ASTNode node) {
if (node == null) {
log.debug("AST is null ");
return;
}
if (HQLParser.isAggregateAST(node)) {
return;
} else {
if (node.getToken().getType() == HiveParser.DOT
&& node.getParent().getChild(0).getType() != HiveParser.Identifier) {
String table = HQLParser.findNodeByPath(node, TOK_TABLE_OR_COL, Identifier).toString();
String column = node.getChild(1).toString().toLowerCase();
String factAlias = getFactAlias();
if (table.equals(factAlias)) {
factKeys.add(factAlias + "." + column);
}
}
}
for (int i = 0; i < node.getChildCount(); i++) {
ASTNode child = (ASTNode) node.getChild(i);
getFactKeysFromNode(child);
}
}