当前位置: 首页>>代码示例>>Java>>正文


Java HiveOperation类代码示例

本文整理汇总了Java中org.apache.hadoop.hive.ql.plan.HiveOperation的典型用法代码示例。如果您正苦于以下问题:Java HiveOperation类的具体用法?Java HiveOperation怎么用?Java HiveOperation使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HiveOperation类属于org.apache.hadoop.hive.ql.plan包,在下文中一共展示了HiveOperation类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: isSelectQuery

import org.apache.hadoop.hive.ql.plan.HiveOperation; //导入依赖的package包/类
private boolean isSelectQuery(HiveEventContext event) {
    if (event.getOperation() == HiveOperation.QUERY) {
        //Select query has only one output
        if (event.getOutputs().size() == 1) {
            WriteEntity output = event.getOutputs().iterator().next();
            /* Strangely select queries have DFS_DIR as the type which seems like a bug in hive. Filter out by checking if the path is a temporary URI
             * Insert into/overwrite queries onto local or dfs paths have DFS_DIR or LOCAL_DIR as the type and WriteType.PATH_WRITE and tempUri = false
             * Insert into a temporary table has isTempURI = false. So will not skip as expected
             */
            if (output.getType() == Type.DFS_DIR || output.getType() == Type.LOCAL_DIR) {
                if (output.getWriteType() == WriteEntity.WriteType.PATH_WRITE &&
                    output.isTempURI()) {
                    return true;
                }
            }
        }
    }
    return false;
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:20,代码来源:HiveHook.java

示例2: addInputs

import org.apache.hadoop.hive.ql.plan.HiveOperation; //导入依赖的package包/类
private static void addInputs(HiveMetaStoreBridge hiveBridge, HiveOperation op, SortedSet<ReadEntity> sortedInputs, StringBuilder buffer, final Map<ReadEntity, Referenceable> refs, final boolean ignoreHDFSPathsInQFName) throws HiveException {
    if (refs != null) {
        if (sortedInputs != null) {
            Set<String> dataSetsProcessed = new LinkedHashSet<>();
            for (Entity input : sortedInputs) {

                if (!dataSetsProcessed.contains(input.getName().toLowerCase())) {
                    //HiveOperation.QUERY type encompasses INSERT, INSERT_OVERWRITE, UPDATE, DELETE, PATH_WRITE operations
                    if (ignoreHDFSPathsInQFName &&
                        (Type.DFS_DIR.equals(input.getType()) || Type.LOCAL_DIR.equals(input.getType()))) {
                        LOG.debug("Skipping dfs dir input addition to process qualified name {} ", input.getName());
                    } else if (refs.containsKey(input)) {
                        if ( input.getType() == Type.PARTITION || input.getType() == Type.TABLE) {
                            final Date createTime = HiveMetaStoreBridge.getTableCreatedTime(hiveBridge.hiveClient.getTable(input.getTable().getDbName(), input.getTable().getTableName()));
                            addDataset(buffer, refs.get(input), createTime.getTime());
                        } else {
                            addDataset(buffer, refs.get(input));
                        }
                    }
                    dataSetsProcessed.add(input.getName().toLowerCase());
                }
            }

        }
    }
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:27,代码来源:HiveHook.java

示例3: addQueryType

import org.apache.hadoop.hive.ql.plan.HiveOperation; //导入依赖的package包/类
private static boolean addQueryType(HiveOperation op, WriteEntity entity) {
    if (entity.getWriteType() != null && HiveOperation.QUERY.equals(op)) {
        switch (entity.getWriteType()) {
        case INSERT:
        case INSERT_OVERWRITE:
        case UPDATE:
        case DELETE:
            return true;
        case PATH_WRITE:
            //Add query type only for DFS paths and ignore local paths since they are not added as outputs
            if ( !Type.LOCAL_DIR.equals(entity.getType())) {
                return true;
            }
            break;
        default:
        }
    }
    return false;
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:20,代码来源:HiveHook.java

示例4: testInsertIntoTempTable

import org.apache.hadoop.hive.ql.plan.HiveOperation; //导入依赖的package包/类
@Test(enabled = false)
public void testInsertIntoTempTable() throws Exception {
    String tableName = createTable();
    String insertTableName = createTable(false, false, true);
    assertTableIsRegistered(DEFAULT_DB, tableName);
    assertTableIsNotRegistered(DEFAULT_DB, insertTableName, true);

    String query =
        "insert into " + insertTableName + " select id, name from " + tableName;

    runCommand(query);

    Set<ReadEntity> inputs = getInputs(tableName, Entity.Type.TABLE);
    Set<WriteEntity> outputs = getOutputs(insertTableName, Entity.Type.TABLE);
    outputs.iterator().next().setName(getQualifiedTblName(insertTableName + HiveMetaStoreBridge.TEMP_TABLE_PREFIX + SessionState.get().getSessionId()));
    outputs.iterator().next().setWriteType(WriteEntity.WriteType.INSERT);

    validateProcess(constructEvent(query,  HiveOperation.QUERY, inputs, outputs));

    assertTableIsRegistered(DEFAULT_DB, tableName);
    assertTableIsRegistered(DEFAULT_DB, insertTableName, null, true);
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:23,代码来源:HiveHookIT.java

示例5: testTruncateTable

import org.apache.hadoop.hive.ql.plan.HiveOperation; //导入依赖的package包/类
@Test
public void testTruncateTable() throws Exception {
    String tableName = createTable(false);
    String query = String.format("truncate table %s", tableName);
    runCommand(query);

    Set<WriteEntity> outputs = getOutputs(tableName, Entity.Type.TABLE);

    String tableId = assertTableIsRegistered(DEFAULT_DB, tableName);
    validateProcess(constructEvent(query, HiveOperation.TRUNCATETABLE, null, outputs));

    //Check lineage
    String datasetName = HiveMetaStoreBridge.getTableQualifiedName(CLUSTER_NAME, DEFAULT_DB, tableName);
    JSONObject response = atlasClient.getInputGraph(datasetName);
    JSONObject vertices = response.getJSONObject("values").getJSONObject("vertices");
    //Below should be assertTrue - Fix https://issues.apache.org/jira/browse/ATLAS-653
    Assert.assertFalse(vertices.has(tableId));
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:19,代码来源:HiveHookIT.java

示例6: verifyFailureHook

import org.apache.hadoop.hive.ql.plan.HiveOperation; //导入依赖的package包/类
private void verifyFailureHook(HiveOperation expectedOp,
    String dbName, String tableName, boolean checkSentryAccessDeniedException)
    throws Exception {
  if (!isInternalServer) {
    return;
  }

  Assert.assertTrue(DummySentryOnFailureHook.invoked);
  if (expectedOp != null) {
    Assert.assertNotNull("Hive op is null for op: " + expectedOp, DummySentryOnFailureHook.hiveOp);
    Assert.assertTrue(expectedOp.equals(DummySentryOnFailureHook.hiveOp));
  }
  if (checkSentryAccessDeniedException) {
    Assert.assertTrue("Expected SentryDeniedException for op: " + expectedOp,
        DummySentryOnFailureHook.exception.getCause() instanceof SentryAccessDeniedException);
  }
  if(tableName != null) {
    Assert.assertNotNull("Table object is null for op: " + expectedOp, DummySentryOnFailureHook.table);
    Assert.assertTrue(tableName.equalsIgnoreCase(DummySentryOnFailureHook.table.getName()));
  }
  if(dbName != null) {
    Assert.assertNotNull("Database object is null for op: " + expectedOp, DummySentryOnFailureHook.db);
    Assert.assertTrue(dbName.equalsIgnoreCase(DummySentryOnFailureHook.db.getName()));
  }
}
 
开发者ID:apache,项目名称:incubator-sentry,代码行数:26,代码来源:TestPrivilegeWithGrantOption.java

示例7: SentryOnFailureHookContextImpl

import org.apache.hadoop.hive.ql.plan.HiveOperation; //导入依赖的package包/类
public SentryOnFailureHookContextImpl(String command,
    Set<ReadEntity> inputs, Set<WriteEntity> outputs, HiveOperation hiveOp,
    Database db, Table tab, AccessURI udfURI, AccessURI partitionURI,
    String userName, String ipAddress, AuthorizationException e,
    Configuration conf) {
  this.command = command;
  this.inputs = inputs;
  this.outputs = outputs;
  this.hiveOp = hiveOp;
  this.userName = userName;
  this.ipAddress = ipAddress;
  this.database = db;
  this.table = tab;
  this.udfURI = udfURI;
  this.partitionURI = partitionURI;
  this.authException = e;
  this.conf = conf;
}
 
开发者ID:apache,项目名称:incubator-sentry,代码行数:19,代码来源:SentryOnFailureHookContextImpl.java

示例8: executeOnFailureHooks

import org.apache.hadoop.hive.ql.plan.HiveOperation; //导入依赖的package包/类
private void executeOnFailureHooks(HiveSemanticAnalyzerHookContext context,
    HiveOperation hiveOp, AuthorizationException e) {
  SentryOnFailureHookContext hookCtx = new SentryOnFailureHookContextImpl(
      context.getCommand(), context.getInputs(), context.getOutputs(),
      hiveOp, currDB, currTab, udfURI, null, context.getUserName(),
      context.getIpAddress(), e, context.getConf());
  String csHooks = authzConf.get(
      HiveAuthzConf.AuthzConfVars.AUTHZ_ONFAILURE_HOOKS.getVar(), "").trim();

  try {
    for (Hook aofh : getHooks(csHooks)) {
      ((SentryOnFailureHook)aofh).run(hookCtx);
    }
  } catch (Exception ex) {
    LOG.error("Error executing hook:", ex);
  }
}
 
开发者ID:apache,项目名称:incubator-sentry,代码行数:18,代码来源:HiveAuthzBindingHook.java

示例9: authorizeCreateTable

import org.apache.hadoop.hive.ql.plan.HiveOperation; //导入依赖的package包/类
private void authorizeCreateTable(PreCreateTableEvent context)
    throws InvalidOperationException, MetaException {
  HierarcyBuilder inputBuilder = new HierarcyBuilder();
  inputBuilder.addDbToOutput(getAuthServer(), context.getTable().getDbName());
  HierarcyBuilder outputBuilder = new HierarcyBuilder();
  outputBuilder.addDbToOutput(getAuthServer(), context.getTable().getDbName());

  if (!StringUtils.isEmpty(context.getTable().getSd().getLocation())) {
    String uriPath;
    try {
      uriPath = PathUtils.parseDFSURI(warehouseDir,
          getSdLocation(context.getTable().getSd()));
    } catch(URISyntaxException e) {
      throw new MetaException(e.getMessage());
    }
    inputBuilder.addUriToOutput(getAuthServer(), uriPath, warehouseDir);
  }
  authorizeMetastoreAccess(HiveOperation.CREATETABLE, inputBuilder.build(),
      outputBuilder.build());
}
 
开发者ID:apache,项目名称:incubator-sentry,代码行数:21,代码来源:MetastoreAuthzBinding.java

示例10: testMsckRepairTable

import org.apache.hadoop.hive.ql.plan.HiveOperation; //导入依赖的package包/类
/**
 * Positive test case for MSCK REPAIR TABLE. User has privileges to execute the
 * operation.
 */
@Test
public void testMsckRepairTable() throws Exception {
  outputTabHierarcyList.add(buildObjectHierarchy(SERVER1, JUNIOR_ANALYST_DB, PURCHASES_TAB));
  testAuth.authorize(HiveOperation.MSCK, alterTabPrivileges, MANAGER_SUBJECT,
    inputTabHierarcyList, outputTabHierarcyList);

  // Should also succeed for the admin.
  testAuth.authorize(HiveOperation.MSCK, alterTabPrivileges, ADMIN_SUBJECT,
    inputTabHierarcyList, outputTabHierarcyList);

  // Admin can also run this against tables in the ANALYST_DB.
  inputTabHierarcyList.add(buildObjectHierarchy(SERVER1, ANALYST_DB, PURCHASES_TAB));
  testAuth.authorize(HiveOperation.MSCK, alterTabPrivileges, ADMIN_SUBJECT,
    inputTabHierarcyList, outputTabHierarcyList);
}
 
开发者ID:apache,项目名称:incubator-sentry,代码行数:20,代码来源:TestHiveAuthzBindings.java

示例11: postAnalyze

import org.apache.hadoop.hive.ql.plan.HiveOperation; //导入依赖的package包/类
/**
 * Post analyze hook that invokes hive auth bindings
 */
@Override
public void postAnalyze(HiveSemanticAnalyzerHookContext context,
    List<Task<? extends Serializable>> rootTasks) throws SemanticException {
  HiveOperation stmtOperation = getCurrentHiveStmtOp();
  Subject subject = new Subject(context.getUserName());
  for (int i = 0; i < rootTasks.size(); i++) {
    Task<? extends Serializable> task = rootTasks.get(i);
    if (task instanceof DDLTask) {
      SentryFilterDDLTask filterTask =
          new SentryFilterDDLTask(hiveAuthzBinding, subject, stmtOperation);
      filterTask.setWork((DDLWork)task.getWork());
      rootTasks.set(i, filterTask);
    }
  }
}
 
开发者ID:apache,项目名称:incubator-sentry,代码行数:19,代码来源:HiveAuthzBindingHookV2.java

示例12: isCreateOp

import org.apache.hadoop.hive.ql.plan.HiveOperation; //导入依赖的package包/类
private static boolean isCreateOp(HiveEventContext hiveEvent) {
    return HiveOperation.CREATETABLE.equals(hiveEvent.getOperation())
            || HiveOperation.CREATEVIEW.equals(hiveEvent.getOperation())
            || HiveOperation.ALTERVIEW_AS.equals(hiveEvent.getOperation())
            || HiveOperation.ALTERTABLE_LOCATION.equals(hiveEvent.getOperation())
            || HiveOperation.CREATETABLE_AS_SELECT.equals(hiveEvent.getOperation());
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:8,代码来源:HiveHook.java

示例13: getProcessQualifiedName

import org.apache.hadoop.hive.ql.plan.HiveOperation; //导入依赖的package包/类
@VisibleForTesting
static String getProcessQualifiedName(HiveMetaStoreBridge dgiBridge, HiveEventContext eventContext,
                                      final SortedSet<ReadEntity> sortedHiveInputs,
                                      final SortedSet<WriteEntity> sortedHiveOutputs,
                                      SortedMap<ReadEntity, Referenceable> hiveInputsMap,
                                      SortedMap<WriteEntity, Referenceable> hiveOutputsMap) throws HiveException {
    HiveOperation op = eventContext.getOperation();
    if (isCreateOp(eventContext)) {
        Entity entity = getEntityByType(sortedHiveOutputs, Type.TABLE);

        if (entity != null) {
            Table outTable = entity.getTable();
            //refresh table
            outTable = dgiBridge.hiveClient.getTable(outTable.getDbName(), outTable.getTableName());
            return HiveMetaStoreBridge.getTableProcessQualifiedName(dgiBridge.getClusterName(), outTable);
        }
    }

    StringBuilder buffer = new StringBuilder(op.getOperationName());

    boolean ignoreHDFSPathsinQFName = ignoreHDFSPathsinQFName(op, sortedHiveInputs, sortedHiveOutputs);
    if ( ignoreHDFSPathsinQFName && LOG.isDebugEnabled()) {
        LOG.debug("Ignoring HDFS paths in qualifiedName for {} {} ", op, eventContext.getQueryStr());
    }

    addInputs(dgiBridge, op, sortedHiveInputs, buffer, hiveInputsMap, ignoreHDFSPathsinQFName);
    buffer.append(IO_SEP);
    addOutputs(dgiBridge, op, sortedHiveOutputs, buffer, hiveOutputsMap, ignoreHDFSPathsinQFName);
    LOG.info("Setting process qualified name to {}", buffer);
    return buffer.toString();
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:32,代码来源:HiveHook.java

示例14: ignoreHDFSPathsinQFName

import org.apache.hadoop.hive.ql.plan.HiveOperation; //导入依赖的package包/类
private static boolean ignoreHDFSPathsinQFName(final HiveOperation op, final Set<ReadEntity> inputs, final Set<WriteEntity> outputs) {
    switch (op) {
    case LOAD:
    case IMPORT:
        return isPartitionBasedQuery(outputs);
    case EXPORT:
        return isPartitionBasedQuery(inputs);
    case QUERY:
        return true;
    }
    return false;
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:13,代码来源:HiveHook.java

示例15: addOutputs

import org.apache.hadoop.hive.ql.plan.HiveOperation; //导入依赖的package包/类
private static void addOutputs(HiveMetaStoreBridge hiveBridge, HiveOperation op, SortedSet<WriteEntity> sortedOutputs, StringBuilder buffer, final Map<WriteEntity, Referenceable> refs, final boolean ignoreHDFSPathsInQFName) throws HiveException {
    if (refs != null) {
        Set<String> dataSetsProcessed = new LinkedHashSet<>();
        if (sortedOutputs != null) {
            for (WriteEntity output : sortedOutputs) {
                final Entity entity = output;
                if (!dataSetsProcessed.contains(output.getName().toLowerCase())) {
                    //HiveOperation.QUERY type encompasses INSERT, INSERT_OVERWRITE, UPDATE, DELETE, PATH_WRITE operations
                    if (addQueryType(op, (WriteEntity) entity)) {
                        buffer.append(SEP);
                        buffer.append(((WriteEntity) entity).getWriteType().name());
                    }
                    if (ignoreHDFSPathsInQFName &&
                        (Type.DFS_DIR.equals(output.getType()) || Type.LOCAL_DIR.equals(output.getType()))) {
                        LOG.debug("Skipping dfs dir output addition to process qualified name {} ", output.getName());
                    } else if (refs.containsKey(output)) {
                        if ( output.getType() == Type.PARTITION || output.getType() == Type.TABLE) {
                            final Date createTime = HiveMetaStoreBridge.getTableCreatedTime(hiveBridge.hiveClient.getTable(output.getTable().getDbName(), output.getTable().getTableName()));
                            addDataset(buffer, refs.get(output), createTime.getTime());
                        } else {
                            addDataset(buffer, refs.get(output));
                        }
                    }
                    dataSetsProcessed.add(output.getName().toLowerCase());
                }
            }
        }
    }
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:30,代码来源:HiveHook.java


注:本文中的org.apache.hadoop.hive.ql.plan.HiveOperation类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。