当前位置: 首页>>代码示例>>Java>>正文


Java CommandProcessorResponse类代码示例

本文整理汇总了Java中org.apache.hadoop.hive.ql.processors.CommandProcessorResponse的典型用法代码示例。如果您正苦于以下问题:Java CommandProcessorResponse类的具体用法?Java CommandProcessorResponse怎么用?Java CommandProcessorResponse使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


CommandProcessorResponse类属于org.apache.hadoop.hive.ql.processors包,在下文中一共展示了CommandProcessorResponse类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: executeQuery

import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; //导入依赖的package包/类
/**
 * Execute the give <i>query</i> on given <i>hiveDriver</i> instance. If a {@link CommandNeedRetryException}
 * exception is thrown, it tries upto 3 times before returning failure.
 * @param hiveDriver
 * @param query
 */
public static void executeQuery(Driver hiveDriver, String query) {
  CommandProcessorResponse response = null;
  boolean failed = false;
  int retryCount = 3;

  try {
    response = hiveDriver.run(query);
  } catch(CommandNeedRetryException ex) {
    if (--retryCount == 0) {
      failed = true;
    }
  }

  if (failed || response.getResponseCode() != 0 ) {
    throw new RuntimeException(String.format("Failed to execute command '%s', errorMsg = '%s'",
        query, (response != null ? response.getErrorMessage() : "")));
  }
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:25,代码来源:HiveTestUtilities.java

示例2: executeHQL

import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; //导入依赖的package包/类
/**
 * 
 * @param hql
 * @throws CommandNeedRetryException
 * @throws IOException
 */
public void executeHQL(String hql) throws CommandNeedRetryException, IOException {
    CommandProcessorResponse response = getDriver().run(hql);
    int retCode = response.getResponseCode();
    if (retCode != 0) {
        String err = response.getErrorMessage();
        throw new IOException("Failed to execute hql [" + hql + "], error message is: " + err);
    }
}
 
开发者ID:KylinOLAP,项目名称:Kylin,代码行数:15,代码来源:HiveClient.java

示例3: runExplain

import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; //导入依赖的package包/类
private CommandProcessorResponse runExplain(String hql, HiveConf conf) throws Exception {
  Driver hiveDriver = new Driver(conf, "anonymous");
  CommandProcessorResponse response = hiveDriver.run("EXPLAIN EXTENDED " + hql);
  hiveDriver.resetFetch();
  hiveDriver.setMaxRows(Integer.MAX_VALUE);
  List<Object> explainResult = new ArrayList<Object>();
  hiveDriver.getResults(explainResult);

  for (Object explainRow : explainResult) {
    // Print the following to stdout to check partition output.
    // Not parsing the output because it will slow down the test
    assertNotNull(explainRow.toString());
  }

  return response;
}
 
开发者ID:apache,项目名称:lens,代码行数:17,代码来源:TestCubeRewriter.java

示例4: execHiveDDL

import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; //导入依赖的package包/类
public void execHiveDDL(String ddl) throws Exception {
    LOG.info("Executing ddl = " + ddl);

    Driver hiveDriver = new Driver();
    CommandProcessorResponse response = hiveDriver.run(ddl);

    if (response.getResponseCode() > 0) {
        throw new Exception(response.getErrorMessage());
    }
}
 
开发者ID:patw,项目名称:storm-sample,代码行数:11,代码来源:BackupHiveTablePartitionAction.java

示例5: execHiveSQLwithOverlay

import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; //导入依赖的package包/类
public void execHiveSQLwithOverlay(final String sqlStmt,
    final String userName, Map<String, String> overLay) throws Exception {
  final HiveConf hiveConf = new HiveConf();
  for (Map.Entry<String, String> entry : overLay.entrySet()) {
    hiveConf.set(entry.getKey(), entry.getValue());
  }
  UserGroupInformation clientUgi = UserGroupInformation
      .createRemoteUser(userName);
  clientUgi.doAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Void run() throws Exception {
      Driver driver = new Driver(hiveConf, userName);
      SessionState.start(new CliSessionState(hiveConf));
      CommandProcessorResponse cpr = driver.run(sqlStmt);
      if (cpr.getResponseCode() != 0) {
        throw new IOException("Failed to execute \"" + sqlStmt
            + "\". Driver returned " + cpr.getResponseCode() + " Error: "
            + cpr.getErrorMessage());
      }
      driver.close();
      SessionState.get().close();
      return null;
    }
  });
}
 
开发者ID:apache,项目名称:incubator-sentry,代码行数:26,代码来源:AbstractMetastoreTestWithStaticConfiguration.java

示例6: verifyLocalQuery

import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; //导入依赖的package包/类
public void verifyLocalQuery(String queryStr) throws Exception {
  // setup Hive driver
  SessionState session = new SessionState(getHiveConf());
  SessionState.start(session);
  Driver driver = new Driver(session.getConf(), getUser());

  // compile the query
  CommandProcessorResponse compilerStatus = driver
      .compileAndRespond(queryStr);
  if (compilerStatus.getResponseCode() != 0) {
    String errMsg = compilerStatus.getErrorMessage();
    if (errMsg.contains(HiveAuthzConf.HIVE_SENTRY_PRIVILEGE_ERROR_MESSAGE)) {
      printMissingPerms(getHiveConf().get(
          HiveAuthzConf.HIVE_SENTRY_AUTH_ERRORS));
    }
    throw new SemanticException("Compilation error: "
        + compilerStatus.getErrorMessage());
  }
  driver.close();
  System.out
      .println("User " + getUser() + " has privileges to run the query");
}
 
开发者ID:apache,项目名称:incubator-sentry,代码行数:23,代码来源:SentryConfigTool.java

示例7: execHiveDDL

import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; //导入依赖的package包/类
public void execHiveDDL(String ddl) throws Exception {
  LOG.info("Executing ddl = " + ddl);

  Driver hiveDriver = new Driver();
  CommandProcessorResponse response = hiveDriver.run(ddl);

  if (response.getResponseCode() > 0) {
    throw new Exception(response.getErrorMessage());
  }
}
 
开发者ID:DhruvKumar,项目名称:iot-masterclass,代码行数:11,代码来源:BackupHiveTablePartitionAction.java

示例8: runCommandWithDelay

import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; //导入依赖的package包/类
protected void runCommandWithDelay(Driver driver, String cmd, int sleepMs) throws Exception {
    LOG.debug("Running command '{}'", cmd);
    ss.setCommandType(null);
    CommandProcessorResponse response = driver.run(cmd);
    assertEquals(response.getResponseCode(), 0);
    if (sleepMs != 0) {
        Thread.sleep(sleepMs);
    }
}
 
开发者ID:apache,项目名称:incubator-atlas,代码行数:10,代码来源:HiveITBase.java

示例9: executeHQL

import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; //导入依赖的package包/类
/**
 * only used by Deploy Util
 */
@Override
public void executeHQL(String hql) throws CommandNeedRetryException, IOException {
    CommandProcessorResponse response = getDriver().run(hql);
    int retCode = response.getResponseCode();
    if (retCode != 0) {
        String err = response.getErrorMessage();
        throw new IOException("Failed to execute hql [" + hql + "], error message is: " + err);
    }
}
 
开发者ID:apache,项目名称:kylin,代码行数:13,代码来源:CLIHiveClient.java

示例10: testHareDriver

import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; //导入依赖的package包/类
@Test
public void testHareDriver() throws Exception {
       HiveConf hiveConf = new HiveConf();
       hiveConf.set("hbase.zookeeper.quorum", "host1");
       hiveConf.set("hbase.zookeeper.property.clientPort", "2181");
       hiveConf.set("fs.default.name", "hdfs://host1:8020");
       hiveConf.set("yarn.resourcemanager.address", "host1:8032");
       hiveConf.set("yarn.resourcemanager.scheduler.address", "host1:8030");
       hiveConf.set("yarn.resourcemanager.resource-tracker.address", "host1:8031");
       hiveConf.set("yarn.resourcemanager.admin.address", "host1:8033");
       hiveConf.set("mapreduce.framework.name", "yarn");
       hiveConf.set("mapreduce.jobhistory.address", "host1:10020");
       hiveConf.set("yarn.nodemanager.aux-services", "mapreduce_shuffle");
       hiveConf.set("javax.jdo.option.ConnectionURL", "jdbc:mysql://192.168.1.214:3306/hare");
       hiveConf.set("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver");
       hiveConf.set("javax.jdo.option.ConnectionUserName", "root");
       hiveConf.set("javax.jdo.option.ConnectionPassword", "123456");
       
       
       hiveConf.set("hive.dbname", "default");
       hiveConf.set("hbase.client.retries.number", "1");
       hiveConf.set("hbase.rpc.timeout", "9999999");
       hiveConf.set("cluster.kerberos.enabled","false");
       
       CliSessionState ss = new CliSessionState(hiveConf);
       UserGroupInformation.setConfiguration(hiveConf);
       SessionState.start(ss);
     
       HareDriver driver = new HareDriver(hiveConf);
       String sql ="select count(`:key`) from stana_host_c";
       
       
       CommandProcessorResponse res = driver.run(sql);

   	System.out.println("Response Code:" + res.getResponseCode());
       System.out.println("Error Message:" + res.getErrorMessage());
       System.out.println("SQL State:" + res.getSQLState());
      
      Schema s = res.getSchema();
      if (s != null) {
           List<FieldSchema> schema = s.getFieldSchemas();
           if ((schema != null) && (!schema.isEmpty())) {
               for (int pos = 0; pos < schema.size(); pos++) {
               }
           }
      }
      int count = 0;
       ArrayList<String> list = new ArrayList<String>();
       try {
           while (driver.getResults(list)) {
               for (String r : list) {
                  System.out.println(r);
                  count++;
               }
               list.clear();
           }
           System.out.println("COUNT:" + count);
       } catch (Exception e) {
           e.printStackTrace();
       }
       ss.close();
   }
 
开发者ID:HareDB,项目名称:HareDBWebRESTful,代码行数:63,代码来源:HareQueryHareQLTest.java

示例11: testInTimeRangeWriterWithHQL

import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; //导入依赖的package包/类
@Test
public void testInTimeRangeWriterWithHQL() throws Exception {
  // For queries with large number of partitions, the where clause generated using
  // the ORTimeRangeWriter causes a stack overflow exception because the operator tree of the where clause
  // gets too deep.

  // In this test, we rewrite the query once with the InTimeRangeWriter and once with ORTimeRangeWriter
  // Explain extended for the  query rewritten with IN clauses passes, while the OR query fails with
  // stack overflow.

  // Also, we can verify by printing the explain output that partitions are indeed getting identified with
  // the IN clause


  // Test 1 - check for contained part columns
  String query = "select dim1, max(msr3)," + " msr2 from testCube" + " where " + TWO_DAYS_RANGE_IT;

  HiveConf conf = new HiveConf(getConf(), TestCubeRewriter.class);
  conf.set(CubeQueryConfUtil.PROCESS_TIME_PART_COL, "pt");
  conf.setClass(CubeQueryConfUtil.TIME_RANGE_WRITER_CLASS,
    AbridgedTimeRangeWriter.class.asSubclass(TimeRangeWriter.class), TimeRangeWriter.class);

  String hqlWithInClause = rewrite(query, conf);
  System.out.println("@@ HQL with IN and OR: " + hqlWithInClause);

  // Run explain on this command, it should pass successfully.
  CommandProcessorResponse inExplainResponse = runExplain(hqlWithInClause, conf);
  assertNotNull(inExplainResponse);
  assertTrue(hqlWithInClause.contains("in"));

  // Test 2 - check for single part column
  // Verify for large number of partitions, single column. This is just to check if we don't see
  // errors on explain of large conditions
  String largePartQuery = "SELECT msr1 from testCube WHERE " + TWO_MONTHS_RANGE_UPTO_HOURS;
  HiveConf largeConf = new HiveConf(getConf(), TestCubeRewriter.class);
  largeConf.setClass(CubeQueryConfUtil.TIME_RANGE_WRITER_CLASS,
    AbridgedTimeRangeWriter.class.asSubclass(TimeRangeWriter.class), TimeRangeWriter.class);

  String largePartRewrittenQuery = rewrite(largePartQuery, largeConf);
  CommandProcessorResponse response = runExplain(largePartRewrittenQuery, largeConf);
  assertNotNull(response);
  assertTrue(largePartRewrittenQuery.contains("in"));
}
 
开发者ID:apache,项目名称:lens,代码行数:44,代码来源:TestCubeRewriter.java

示例12: run

import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; //导入依赖的package包/类
public CommandProcessorResponse run(String command) throws CommandNeedRetryException {
  CommandProcessorResponse cpr = runInternal(command);
  if(cpr.getResponseCode() == 0) {
    return cpr;
  }
  SessionState ss = SessionState.get();
  if(ss == null) {
    return cpr;
  }
  MetaDataFormatter mdf = MetaDataFormatUtils.getFormatter(ss.getConf());
  if(!(mdf instanceof JsonMetaDataFormatter)) {
    return cpr;
  }
  /*Here we want to encode the error in machine readable way (e.g. JSON)
   * Ideally, errorCode would always be set to a canonical error defined in ErrorMsg.
   * In practice that is rarely the case, so the messy logic below tries to tease
   * out canonical error code if it can.  Exclude stack trace from output when
   * the error is a specific/expected one.
   * It's written to stdout for backward compatibility (WebHCat consumes it).*/
  try {
    if(downstreamError == null) {
      mdf.error(ss.out, errorMessage, cpr.getResponseCode(), SQLState);
      return cpr;
    }
    ErrorMsg canonicalErr = ErrorMsg.getErrorMsg(cpr.getResponseCode());
    if(canonicalErr != null && canonicalErr != ErrorMsg.GENERIC_ERROR) {
      /*Some HiveExceptions (e.g. SemanticException) don't set
        canonical ErrorMsg explicitly, but there is logic
        (e.g. #compile()) to find an appropriate canonical error and
        return its code as error code. In this case we want to
        preserve it for downstream code to interpret*/
      mdf.error(ss.out, errorMessage, cpr.getResponseCode(), SQLState, null);
      return cpr;
    }
    if(downstreamError instanceof HiveException) {
      HiveException rc = (HiveException) downstreamError;
      mdf.error(ss.out, errorMessage,
              rc.getCanonicalErrorMsg().getErrorCode(), SQLState,
              rc.getCanonicalErrorMsg() == ErrorMsg.GENERIC_ERROR ?
                      org.apache.hadoop.util.StringUtils.stringifyException(rc)
                      : null);
    }
    else {
      ErrorMsg canonicalMsg =
              ErrorMsg.getErrorMsg(downstreamError.getMessage());
      mdf.error(ss.out, errorMessage, canonicalMsg.getErrorCode(),
              SQLState, org.apache.hadoop.util.StringUtils.
              stringifyException(downstreamError));
    }
  }
  catch(HiveException ex) {
    console.printError("Unable to JSON-encode the error",
            org.apache.hadoop.util.StringUtils.stringifyException(ex));
  }
  return cpr;
}
 
开发者ID:adrian-wang,项目名称:project-panthera-skin,代码行数:57,代码来源:SkinDriver.java


注:本文中的org.apache.hadoop.hive.ql.processors.CommandProcessorResponse类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。