当前位置: 首页>>代码示例>>Java>>正文


Java HiveServer2类代码示例

本文整理汇总了Java中org.apache.hive.service.server.HiveServer2的典型用法代码示例。如果您正苦于以下问题:Java HiveServer2类的具体用法?Java HiveServer2怎么用?Java HiveServer2使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HiveServer2类属于org.apache.hive.service.server包,在下文中一共展示了HiveServer2类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.hive.service.server.HiveServer2; //导入依赖的package包/类
public static void main(String[] args) {
	HiveServer2 hi = null;
	
	
	AbstractApplicationContext context = new ClassPathXmlApplicationContext(
			"spring/application-context.xml", HiveApp.class);
	log.info("Hive Application Running");
	context.registerShutdownHook();

	HiveTemplate template = context.getBean(HiveTemplate.class);
	List<String> tables = template.query("show tables;");
	for (String tablName : tables) {
		log.info(tablName);
	}

	PasswordProcessRepository repository = context
			.getBean(HivePasswordProcessRepository.class);
	repository.processPasswordFile("/etc/passwd");
	log.info("Count of password entries = " + repository.count());
	context.close();
	log.info("Hive Application Completed");
}
 
开发者ID:lhfei,项目名称:vdn-log-thinker,代码行数:23,代码来源:HiveApp.java

示例2: start

import org.apache.hive.service.server.HiveServer2; //导入依赖的package包/类
public void start(Map<String, String> confOverlay) throws Exception {
  if (isMetastoreRemote) {
    int metaStorePort = MetaStoreUtils.findFreePort();
    getHiveConf().setVar(ConfVars.METASTOREURIS, "thrift://localhost:" + metaStorePort);
    MetaStoreUtils.startMetaStore(metaStorePort,
    ShimLoader.getHadoopThriftAuthBridge(), getHiveConf());
  }

  hiveServer2 = new HiveServer2();
  // Set confOverlay parameters
  for (Map.Entry<String, String> entry : confOverlay.entrySet()) {
    setConfProperty(entry.getKey(), entry.getValue());
  }
  hiveServer2.init(getHiveConf());
  hiveServer2.start();
  waitForStartup();
  setStarted(true);
}
 
开发者ID:bobfreitas,项目名称:hiveunit-mr2,代码行数:19,代码来源:MiniHS2.java

示例3: init

import org.apache.hive.service.server.HiveServer2; //导入依赖的package包/类
@Override
public void init() throws InitUnitException {
    try {
        hdfsUnit.getFileSystem().mkdirs(new Path(HIVE_HOME));
        hdfsUnit.getFileSystem().setOwner(new Path(HIVE_HOME), "hive", "hive");
    } catch (IOException e) {
        throw new InitUnitException("Failed to create hive home directory: " + HIVE_HOME, e);
    }
    metastorePort = PortProvider.nextPort();
    final HiveConf hiveConf = gatherConfigs();
    new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                //TODO: remove static call
                HiveMetaStore.startMetaStore(metastorePort, null, hiveConf);
            } catch (Throwable throwable) {
                throwable.printStackTrace();
            }
        }
    }).start();
    hiveServer = new HiveServer2();
    hiveServer.init(hiveConf);
    hiveServer.start();
    jdbcUrl = String.format("jdbc:hive2://%s:%s/default", HIVE_HOST, port);
}
 
开发者ID:intropro,项目名称:prairie,代码行数:27,代码来源:Hive2Unit.java

示例4: main

import org.apache.hive.service.server.HiveServer2; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    HiveConf hiveConf = new HiveConf();
    hiveConf.set("javax.jdo.option.ConnectionURL", "jdbc:derby:;databaseName=metastore_db;create=true");
    hiveConf.set("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver");
    hiveConf.set("hive.metastore.warehouse.dir", "file:///tmp");
    //hiveConf.set("hive.server2.thrift.port", "11100");
    hiveConf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false);
    hiveConf.setVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, HOST);
    hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT, PORT);
    hiveConf.setVar(ConfVars.HIVE_SERVER2_AUTHENTICATION, AuthTypes.NOSASL.toString());
    hiveConf.setVar(ConfVars.HIVE_SERVER2_TRANSPORT_MODE, "binary");

    /*<!--hive.metastore.local=true
            mapreduce.framework.name=yarn
            hive.exec.submitviachild=false-->
            hive.debug.localtask=true
            hive.auto.convert.join.use.nonstaged=true*/
    HiveServer2 server = new HiveServer2();
    server.init(hiveConf);
    server.start();

    initClient(createBinaryTransport());
}
 
开发者ID:bbonnin,项目名称:hadoop-mongodb,代码行数:24,代码来源:HiveServer2Launcher.java

示例5: waitForStartupAndGetPort

import org.apache.hive.service.server.HiveServer2; //导入依赖的package包/类
@SuppressWarnings("resource")
private int waitForStartupAndGetPort(HiveServer2 hiveServer2) throws InterruptedException {
  while (true) {
    // thriftCLIService->server->serverTransport_->serverSocket_
    Thread.sleep(100);
    Object o1 = getObject(hiveServer2, "thriftCLIService");
    if (o1 == null) {
      continue;
    }
    Object o2 = getObject(o1, "server");
    if (o2 == null) {
      continue;
    }
    Object o3 = getObject(o2, "serverTransport_");
    if (o3 == null) {
      continue;
    }
    Object o4 = getObject(o3, "serverSocket_");
    if (o4 == null) {
      continue;
    }
    ServerSocket socket = (ServerSocket) o4;
    return socket.getLocalPort();
  }
}
 
开发者ID:apache,项目名称:incubator-blur,代码行数:26,代码来源:BlurSerDeTest.java

示例6: beforeTest

import org.apache.hive.service.server.HiveServer2; //导入依赖的package包/类
@Override
protected void beforeTest() throws Throwable {
  conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, RelaxedSQLStdHiveAuthorizerFactory.class.getName());
  hiveServer2 = new HiveServer2();
  hiveServer2.init(conf);
  hiveServer2.start();
  waitForHiveServer2StartUp();

  jdbcConnectionUrl = "jdbc:hive2://localhost:" + port + "/" + databaseName();
}
 
开发者ID:HotelsDotCom,项目名称:beeju,代码行数:11,代码来源:HiveServer2JUnitRule.java

示例7: start

import org.apache.hive.service.server.HiveServer2; //导入依赖的package包/类
@Override
public void start() throws Exception {
    hiveServer2 = new HiveServer2();
    LOG.info("HIVESERVER2: Starting HiveServer2 on port: {}", hiveServer2Port);
    configure();
    hiveServer2.init(hiveConf);
    hiveServer2.start();
}
 
开发者ID:sakserv,项目名称:hadoop-mini-clusters,代码行数:9,代码来源:HiveLocalServer2.java

示例8: createHS2Service

import org.apache.hive.service.server.HiveServer2; //导入依赖的package包/类
/**
 * Creates the h s2 service.
 *
 * @throws Exception the exception
 */
public static void createHS2Service() throws Exception {
  remoteConf.setClass(HiveDriver.HIVE_CONNECTION_CLASS, RemoteThriftConnection.class, ThriftConnection.class);
  remoteConf.set("hive.lock.manager", "org.apache.hadoop.hive.ql.lockmgr.EmbeddedLockManager");
  HiveConf.setVar(remoteConf, HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, HS2_HOST);
  HiveConf.setIntVar(remoteConf, HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT, HS2_PORT);
  HiveConf.setIntVar(remoteConf, HiveConf.ConfVars.HIVE_SERVER2_WEBUI_PORT, HS2_UI_PORT);
  HiveConf.setIntVar(remoteConf, HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT, 3);
  HiveConf.setIntVar(remoteConf, HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT, 3);
  HiveConf.setVar(remoteConf, HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_RETRY_DELAY_SECONDS, "10s");
  HiveConf.setVar(remoteConf, HiveConf.ConfVars.HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT, "1s");
  HiveConf.setVar(remoteConf, HiveConf.ConfVars.SERVER_READ_SOCKET_TIMEOUT, "60000s");
  remoteConf.setLong(HiveDriver.HS2_CONNECTION_EXPIRY_DELAY, 10000);
  server = new HiveServer2();
  hiveConf = new HiveConf();
  hiveConf.addResource(remoteConf);
  server.init(hiveConf);
  server.start();
  while (true) {
    try {
      new Socket(HS2_HOST, HS2_PORT);
      break;
    } catch (Throwable th) {
      Thread.sleep(1000);
    }
  }
}
 
开发者ID:apache,项目名称:lens,代码行数:32,代码来源:TestRemoteHiveDriver.java

示例9: init

import org.apache.hive.service.server.HiveServer2; //导入依赖的package包/类
/**
 * Will start the HiveServer.
 *
 * @param testConfig Specific test case properties. Will be merged with the HiveConf of the context
 * @param hiveVars   HiveVars to pass on to the HiveServer for this session
 */
public void init(Map<String, String> testConfig, Map<String, String> hiveVars) {

    context.init();

    HiveConf hiveConf = context.getHiveConf();

    // merge test case properties with hive conf before HiveServer is started.
    for (Map.Entry<String, String> property : testConfig.entrySet()) {
        hiveConf.set(property.getKey(), property.getValue());
    }

    try {
        hiveServer2 = new HiveServer2();
        hiveServer2.init(hiveConf);

        // Locate the ClIService in the HiveServer2
        for (Service service : hiveServer2.getServices()) {
            if (service instanceof CLIService) {
                client = (CLIService) service;
            }
        }

        Preconditions.checkNotNull(client, "ClIService was not initialized by HiveServer2");

        sessionHandle = client.openSession("noUser", "noPassword", null);

        SessionState sessionState = client.getSessionManager().getSession(sessionHandle).getSessionState();
        currentSessionState = sessionState;
        currentSessionState.setHiveVariables(hiveVars);
    } catch (Exception e) {
        throw new IllegalStateException("Failed to create HiveServer :" + e.getMessage(), e);
    }

    // Ping hive server before we do anything more with it! If validation
    // is switched on, this will fail if metastorage is not set up properly
    pingHiveServer();
}
 
开发者ID:klarna,项目名称:HiveRunner,代码行数:44,代码来源:HiveServerContainer.java

示例10: InternalHiveServerRunner

import org.apache.hive.service.server.HiveServer2; //导入依赖的package包/类
public InternalHiveServerRunner(String hostname, int port) throws Exception {
  super(hostname, port);
  hiveServer2 = new HiveServer2();
}
 
开发者ID:vybs,项目名称:sqoop-on-spark,代码行数:5,代码来源:InternalHiveServerRunner.java

示例11: getDbs

import org.apache.hive.service.server.HiveServer2; //导入依赖的package包/类
@Test
	public void getDbs() {
//		ThriftHiveProcessorFactory f = null;
		
		HiveServer2 server = null;
		
		passwordProcessService.processPasswordFile("/etc/passwd");
		
		Long total = passwordProcessService.count();
		
		List<String> dbs = passwordProcessService.getDbs();
		
		for(String db : dbs) {
			log.info("User total: {}", db);
		}
		
	}
 
开发者ID:lhfei,项目名称:vdn-log-thinker,代码行数:18,代码来源:PasswordProcessServiceImplTest.java

示例12: start

import org.apache.hive.service.server.HiveServer2; //导入依赖的package包/类
@SuppressWarnings("ResultOfMethodCallIgnored")
@Override
public synchronized void start() throws Exception {
  long time = log(LOG, "start");
  CdhServer.setEnvProperty("HIVE_HOME", null);
  CdhServer.setEnvProperty("HIVE_CONF_DIR", null);
  Path hiveHomePath = new Path(DfsServer.getInstance().getPathUri("/"), DIR_HOME);
  Path hiveWarehousePath = new Path(hiveHomePath, DIR_WAREHOUSE);
  Path hiveScratchPath = new Path(hiveHomePath, DIR_SCRATCH);
  File hiveScratchLocalPath = new File(ABS_DIR_HIVE, DIR_SCRATCH);
  File derbyDir = new File(ABS_DIR_DERBY_DB);
  String hiveDerbyConnectString = "jdbc:derby:" + derbyDir.getAbsolutePath() + "/test-hive-metastore-"
    + DERBY_DB_COUNTER.incrementAndGet() + ";create=true";
  FileUtils.deleteDirectory(derbyDir);
  derbyDir.mkdirs();
  DfsServer.getInstance().getFileSystem().mkdirs(hiveHomePath);
  DfsServer.getInstance().getFileSystem().mkdirs(hiveWarehousePath);
  FileSystem.mkdirs(DfsServer.getInstance().getFileSystem(), hiveWarehousePath, new FsPermission((short) 511));
  FileSystem.mkdirs(DfsServer.getInstance().getFileSystem(), hiveScratchPath, new FsPermission((short) 475));
  HiveConf hiveConf = new HiveConf(HiveServer.class);
  hiveConf.setVar(ConfVars.METASTOREWAREHOUSE, hiveWarehousePath.toString());
  hiveConf.setVar(ConfVars.METASTORECONNECTURLKEY, hiveDerbyConnectString);
  hiveConf.setVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, "localhost");
  hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT, binaryPort);
  hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT, httpPort);
  hiveConf.setVar(ConfVars.SCRATCHDIR, hiveScratchPath.toString());
  hiveConf.setVar(ConfVars.LOCALSCRATCHDIR, hiveScratchLocalPath.getAbsolutePath());
  hiveConf.set(CommonConfigurationKeysPublic.HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY, "false");
  hiveConf.setVar(ConfVars.HIVEINPUTFORMAT, CombineHiveInputFormat.class.getName());
  hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, Boolean.FALSE);
  hiveConf.setBoolVar(ConfVars.LOCALMODEAUTO, Boolean.FALSE);
  hiveConf.setBoolVar(ConfVars.HIVECONVERTJOIN, Boolean.FALSE);
  hiveConf.setBoolVar(ConfVars.HIVEIGNOREMAPJOINHINT, Boolean.FALSE);
  switch (getRuntime()) {
    case LOCAL_MR2:
      break;
    case LOCAL_SPARK:
      hiveConf.setVar(ConfVars.HIVE_EXECUTION_ENGINE, "spark");
      hiveConf.set(HIVE_CONF_SPARK_MASTER, "local[*]");
      break;
    default:
      throw new IllegalArgumentException("Unsupported [" + getClass().getSimpleName() + "] runtime [" + getRuntime() + "]");
  }
  hiveServer = new HiveServer2();
  hiveServer.init(hiveConf);
  hiveServer.start();
  waitForStart();
  SessionState.start(new SessionState(hiveConf));
  setConf(hiveConf);
  log(LOG, "start", time);
}
 
开发者ID:ggear,项目名称:cloudera-framework,代码行数:52,代码来源:HiveServer.java

示例13: setUpClass

import org.apache.hive.service.server.HiveServer2; //导入依赖的package包/类
/**
 * Start all required mini clusters.
 */
@BeforeClass
public static void setUpClass() throws Exception {
  // Conf dir
  if (!new File(confDir).mkdirs()) {
    fail("Failed to create config directories.");
  }

  // HDFS
  File minidfsDir = new File("target/minidfs").getAbsoluteFile();
  if (!minidfsDir.exists()) {
    Assert.assertTrue(minidfsDir.mkdirs());
  }
  Set<PosixFilePermission> set = new HashSet<>();
  set.add(PosixFilePermission.OWNER_EXECUTE);
  set.add(PosixFilePermission.OWNER_READ);
  set.add(PosixFilePermission.OWNER_WRITE);
  set.add(PosixFilePermission.OTHERS_READ);
  java.nio.file.Files.setPosixFilePermissions(minidfsDir.toPath(), set);
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
  final Configuration conf = new HdfsConfiguration();
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
  miniDFS = new MiniDFSCluster.Builder(conf).build();
  miniDFS.getFileSystem().setPermission(new Path("/"), FsPermission.createImmutable((short)0777));
  miniMR = MiniMRClientClusterFactory.create(BaseHiveIT.class, 1, conf);
  writeConfiguration(miniMR.getConfig(), confDir + "/core-site.xml");
  writeConfiguration(miniMR.getConfig(), confDir + "/hdfs-site.xml");
  writeConfiguration(miniMR.getConfig(), confDir + "/mapred-site.xml");
  writeConfiguration(miniMR.getConfig(), confDir + "/yarn-site.xml");

  // Configuration for both HMS and HS2
  METASTORE_PORT = NetworkUtils.getRandomPort();
  HIVE_SERVER_PORT = NetworkUtils.getRandomPort();
  final HiveConf hiveConf = new HiveConf(miniDFS.getConfiguration(0), HiveConf.class);
  hiveConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:;databaseName=target/metastore_db;create=true");
  hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, Utils.format("thrift://{}:{}", HOSTNAME, METASTORE_PORT));
  hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST.varname, "localhost");
  hiveConf.set("org.jpox.autoCreateSchema", "true");
  hiveConf.set("datanucleus.schema.autoCreateTables", "true");
  hiveConf.set("hive.metastore.schema.verification", "false");
  hiveConf.setInt(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT.varname, HIVE_SERVER_PORT);

  // Hive metastore
  Callable<Void> metastoreService = () -> {
    try {
      HiveMetaStore.startMetaStore(METASTORE_PORT, ShimLoader.getHadoopThriftAuthBridge(), hiveConf);
      while(true);
    } catch (Throwable e) {
      throw new Exception("Error starting metastore", e);
    }
  };
  hiveMetastoreExecutor.submit(metastoreService);
  NetworkUtils.waitForStartUp(HOSTNAME, METASTORE_PORT, MINICLUSTER_BOOT_RETRY, MINICLUSTER_BOOT_SLEEP);

  // HiveServer 2
  hiveServer2 = new HiveServer2();
  hiveServer2.init(hiveConf);
  hiveServer2.start();
  writeConfiguration(hiveServer2.getHiveConf(), confDir + "/hive-site.xml");
  NetworkUtils.waitForStartUp(HOSTNAME, HIVE_SERVER_PORT, MINICLUSTER_BOOT_RETRY, MINICLUSTER_BOOT_SLEEP);

  // JDBC Connection to Hive
  Class.forName(HIVE_JDBC_DRIVER);
  hiveConnection = HiveMetastoreUtil.getHiveConnection(
    getHiveJdbcUrl(),
    HadoopSecurityUtil.getLoginUser(conf),
    Collections.emptyList()
  );

  // And finally we're initialized
  isHiveInitialized = true;
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:76,代码来源:BaseHiveIT.java

示例14: runLoad

import org.apache.hive.service.server.HiveServer2; //导入依赖的package包/类
private int runLoad(boolean disableMrUpdate) throws IOException, InterruptedException, ClassNotFoundException,
    SQLException {

  Configuration configuration = miniCluster.getMRConfiguration();
  writeSiteFiles(configuration);
  HiveConf hiveConf = new HiveConf(configuration, getClass());
  hiveConf.set("hive.server2.thrift.port", "0");
  HiveServer2 hiveServer2 = new HiveServer2();
  hiveServer2.init(hiveConf);
  hiveServer2.start();

  int port = waitForStartupAndGetPort(hiveServer2);

  Class.forName(HiveDriver.class.getName());
  String userName = UserGroupInformation.getCurrentUser().getShortUserName();
  Connection connection = DriverManager.getConnection("jdbc:hive2://localhost:" + port, userName, "");

  UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();

  run(connection, "set blur.user.name=" + currentUser.getUserName());
  run(connection, "set blur.mr.update.disabled=" + disableMrUpdate);
  run(connection, "set hive.metastore.warehouse.dir=" + WAREHOUSE.toURI().toString());
  run(connection, "create database if not exists testdb");
  run(connection, "use testdb");

  run(connection, "CREATE TABLE if not exists testtable ROW FORMAT SERDE 'org.apache.blur.hive.BlurSerDe' "
      + "WITH SERDEPROPERTIES ( 'blur.zookeeper.connection'='" + miniCluster.getZkConnectionString() + "', "
      + "'blur.table'='" + TEST + "', 'blur.family'='" + FAM + "' ) "
      + "STORED BY 'org.apache.blur.hive.BlurHiveStorageHandler'");

  run(connection, "desc testtable");

  String createLoadTable = buildCreateLoadTable(connection);
  run(connection, createLoadTable);
  File dbDir = new File(WAREHOUSE, "testdb.db");
  File tableDir = new File(dbDir, "loadtable");
  int totalRecords = 100;
  generateData(tableDir, totalRecords);

  run(connection, "select * from loadtable");
  run(connection, "set " + BlurSerDe.BLUR_BLOCKING_APPLY + "=true");
  run(connection, "insert into table testtable select * from loadtable");
  connection.close();
  hiveServer2.stop();
  return totalRecords;
}
 
开发者ID:apache,项目名称:incubator-blur,代码行数:47,代码来源:BlurSerDeTest.java

示例15: InternalHiveServer

import org.apache.hive.service.server.HiveServer2; //导入依赖的package包/类
public InternalHiveServer(HiveConf conf) throws Exception {
  super(conf, getHostname(conf), getPort(conf));
  hiveServer2 = new HiveServer2();
  this.conf = conf;
}
 
开发者ID:apache,项目名称:incubator-sentry,代码行数:6,代码来源:InternalHiveServer.java


注:本文中的org.apache.hive.service.server.HiveServer2类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。