本文整理汇总了Java中org.apache.hive.service.server.HiveServer2.start方法的典型用法代码示例。如果您正苦于以下问题:Java HiveServer2.start方法的具体用法?Java HiveServer2.start怎么用?Java HiveServer2.start使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hive.service.server.HiveServer2
的用法示例。
在下文中一共展示了HiveServer2.start方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: start
import org.apache.hive.service.server.HiveServer2; //导入方法依赖的package包/类
public void start(Map<String, String> confOverlay) throws Exception {
if (isMetastoreRemote) {
int metaStorePort = MetaStoreUtils.findFreePort();
getHiveConf().setVar(ConfVars.METASTOREURIS, "thrift://localhost:" + metaStorePort);
MetaStoreUtils.startMetaStore(metaStorePort,
ShimLoader.getHadoopThriftAuthBridge(), getHiveConf());
}
hiveServer2 = new HiveServer2();
// Set confOverlay parameters
for (Map.Entry<String, String> entry : confOverlay.entrySet()) {
setConfProperty(entry.getKey(), entry.getValue());
}
hiveServer2.init(getHiveConf());
hiveServer2.start();
waitForStartup();
setStarted(true);
}
示例2: init
import org.apache.hive.service.server.HiveServer2; //导入方法依赖的package包/类
@Override
public void init() throws InitUnitException {
try {
hdfsUnit.getFileSystem().mkdirs(new Path(HIVE_HOME));
hdfsUnit.getFileSystem().setOwner(new Path(HIVE_HOME), "hive", "hive");
} catch (IOException e) {
throw new InitUnitException("Failed to create hive home directory: " + HIVE_HOME, e);
}
metastorePort = PortProvider.nextPort();
final HiveConf hiveConf = gatherConfigs();
new Thread(new Runnable() {
@Override
public void run() {
try {
//TODO: remove static call
HiveMetaStore.startMetaStore(metastorePort, null, hiveConf);
} catch (Throwable throwable) {
throwable.printStackTrace();
}
}
}).start();
hiveServer = new HiveServer2();
hiveServer.init(hiveConf);
hiveServer.start();
jdbcUrl = String.format("jdbc:hive2://%s:%s/default", HIVE_HOST, port);
}
示例3: main
import org.apache.hive.service.server.HiveServer2; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
HiveConf hiveConf = new HiveConf();
hiveConf.set("javax.jdo.option.ConnectionURL", "jdbc:derby:;databaseName=metastore_db;create=true");
hiveConf.set("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver");
hiveConf.set("hive.metastore.warehouse.dir", "file:///tmp");
//hiveConf.set("hive.server2.thrift.port", "11100");
hiveConf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false);
hiveConf.setVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, HOST);
hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT, PORT);
hiveConf.setVar(ConfVars.HIVE_SERVER2_AUTHENTICATION, AuthTypes.NOSASL.toString());
hiveConf.setVar(ConfVars.HIVE_SERVER2_TRANSPORT_MODE, "binary");
/*<!--hive.metastore.local=true
mapreduce.framework.name=yarn
hive.exec.submitviachild=false-->
hive.debug.localtask=true
hive.auto.convert.join.use.nonstaged=true*/
HiveServer2 server = new HiveServer2();
server.init(hiveConf);
server.start();
initClient(createBinaryTransport());
}
示例4: beforeTest
import org.apache.hive.service.server.HiveServer2; //导入方法依赖的package包/类
@Override
protected void beforeTest() throws Throwable {
conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, RelaxedSQLStdHiveAuthorizerFactory.class.getName());
hiveServer2 = new HiveServer2();
hiveServer2.init(conf);
hiveServer2.start();
waitForHiveServer2StartUp();
jdbcConnectionUrl = "jdbc:hive2://localhost:" + port + "/" + databaseName();
}
示例5: start
import org.apache.hive.service.server.HiveServer2; //导入方法依赖的package包/类
@Override
public void start() throws Exception {
hiveServer2 = new HiveServer2();
LOG.info("HIVESERVER2: Starting HiveServer2 on port: {}", hiveServer2Port);
configure();
hiveServer2.init(hiveConf);
hiveServer2.start();
}
示例6: createHS2Service
import org.apache.hive.service.server.HiveServer2; //导入方法依赖的package包/类
/**
* Creates the h s2 service.
*
* @throws Exception the exception
*/
public static void createHS2Service() throws Exception {
remoteConf.setClass(HiveDriver.HIVE_CONNECTION_CLASS, RemoteThriftConnection.class, ThriftConnection.class);
remoteConf.set("hive.lock.manager", "org.apache.hadoop.hive.ql.lockmgr.EmbeddedLockManager");
HiveConf.setVar(remoteConf, HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, HS2_HOST);
HiveConf.setIntVar(remoteConf, HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT, HS2_PORT);
HiveConf.setIntVar(remoteConf, HiveConf.ConfVars.HIVE_SERVER2_WEBUI_PORT, HS2_UI_PORT);
HiveConf.setIntVar(remoteConf, HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT, 3);
HiveConf.setIntVar(remoteConf, HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT, 3);
HiveConf.setVar(remoteConf, HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_RETRY_DELAY_SECONDS, "10s");
HiveConf.setVar(remoteConf, HiveConf.ConfVars.HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT, "1s");
HiveConf.setVar(remoteConf, HiveConf.ConfVars.SERVER_READ_SOCKET_TIMEOUT, "60000s");
remoteConf.setLong(HiveDriver.HS2_CONNECTION_EXPIRY_DELAY, 10000);
server = new HiveServer2();
hiveConf = new HiveConf();
hiveConf.addResource(remoteConf);
server.init(hiveConf);
server.start();
while (true) {
try {
new Socket(HS2_HOST, HS2_PORT);
break;
} catch (Throwable th) {
Thread.sleep(1000);
}
}
}
示例7: start
import org.apache.hive.service.server.HiveServer2; //导入方法依赖的package包/类
@SuppressWarnings("ResultOfMethodCallIgnored")
@Override
public synchronized void start() throws Exception {
long time = log(LOG, "start");
CdhServer.setEnvProperty("HIVE_HOME", null);
CdhServer.setEnvProperty("HIVE_CONF_DIR", null);
Path hiveHomePath = new Path(DfsServer.getInstance().getPathUri("/"), DIR_HOME);
Path hiveWarehousePath = new Path(hiveHomePath, DIR_WAREHOUSE);
Path hiveScratchPath = new Path(hiveHomePath, DIR_SCRATCH);
File hiveScratchLocalPath = new File(ABS_DIR_HIVE, DIR_SCRATCH);
File derbyDir = new File(ABS_DIR_DERBY_DB);
String hiveDerbyConnectString = "jdbc:derby:" + derbyDir.getAbsolutePath() + "/test-hive-metastore-"
+ DERBY_DB_COUNTER.incrementAndGet() + ";create=true";
FileUtils.deleteDirectory(derbyDir);
derbyDir.mkdirs();
DfsServer.getInstance().getFileSystem().mkdirs(hiveHomePath);
DfsServer.getInstance().getFileSystem().mkdirs(hiveWarehousePath);
FileSystem.mkdirs(DfsServer.getInstance().getFileSystem(), hiveWarehousePath, new FsPermission((short) 511));
FileSystem.mkdirs(DfsServer.getInstance().getFileSystem(), hiveScratchPath, new FsPermission((short) 475));
HiveConf hiveConf = new HiveConf(HiveServer.class);
hiveConf.setVar(ConfVars.METASTOREWAREHOUSE, hiveWarehousePath.toString());
hiveConf.setVar(ConfVars.METASTORECONNECTURLKEY, hiveDerbyConnectString);
hiveConf.setVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, "localhost");
hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT, binaryPort);
hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT, httpPort);
hiveConf.setVar(ConfVars.SCRATCHDIR, hiveScratchPath.toString());
hiveConf.setVar(ConfVars.LOCALSCRATCHDIR, hiveScratchLocalPath.getAbsolutePath());
hiveConf.set(CommonConfigurationKeysPublic.HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY, "false");
hiveConf.setVar(ConfVars.HIVEINPUTFORMAT, CombineHiveInputFormat.class.getName());
hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, Boolean.FALSE);
hiveConf.setBoolVar(ConfVars.LOCALMODEAUTO, Boolean.FALSE);
hiveConf.setBoolVar(ConfVars.HIVECONVERTJOIN, Boolean.FALSE);
hiveConf.setBoolVar(ConfVars.HIVEIGNOREMAPJOINHINT, Boolean.FALSE);
switch (getRuntime()) {
case LOCAL_MR2:
break;
case LOCAL_SPARK:
hiveConf.setVar(ConfVars.HIVE_EXECUTION_ENGINE, "spark");
hiveConf.set(HIVE_CONF_SPARK_MASTER, "local[*]");
break;
default:
throw new IllegalArgumentException("Unsupported [" + getClass().getSimpleName() + "] runtime [" + getRuntime() + "]");
}
hiveServer = new HiveServer2();
hiveServer.init(hiveConf);
hiveServer.start();
waitForStart();
SessionState.start(new SessionState(hiveConf));
setConf(hiveConf);
log(LOG, "start", time);
}
示例8: setUpClass
import org.apache.hive.service.server.HiveServer2; //导入方法依赖的package包/类
/**
* Start all required mini clusters.
*/
@BeforeClass
public static void setUpClass() throws Exception {
// Conf dir
if (!new File(confDir).mkdirs()) {
fail("Failed to create config directories.");
}
// HDFS
File minidfsDir = new File("target/minidfs").getAbsoluteFile();
if (!minidfsDir.exists()) {
Assert.assertTrue(minidfsDir.mkdirs());
}
Set<PosixFilePermission> set = new HashSet<>();
set.add(PosixFilePermission.OWNER_EXECUTE);
set.add(PosixFilePermission.OWNER_READ);
set.add(PosixFilePermission.OWNER_WRITE);
set.add(PosixFilePermission.OTHERS_READ);
java.nio.file.Files.setPosixFilePermissions(minidfsDir.toPath(), set);
System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
final Configuration conf = new HdfsConfiguration();
conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
miniDFS = new MiniDFSCluster.Builder(conf).build();
miniDFS.getFileSystem().setPermission(new Path("/"), FsPermission.createImmutable((short)0777));
miniMR = MiniMRClientClusterFactory.create(BaseHiveIT.class, 1, conf);
writeConfiguration(miniMR.getConfig(), confDir + "/core-site.xml");
writeConfiguration(miniMR.getConfig(), confDir + "/hdfs-site.xml");
writeConfiguration(miniMR.getConfig(), confDir + "/mapred-site.xml");
writeConfiguration(miniMR.getConfig(), confDir + "/yarn-site.xml");
// Configuration for both HMS and HS2
METASTORE_PORT = NetworkUtils.getRandomPort();
HIVE_SERVER_PORT = NetworkUtils.getRandomPort();
final HiveConf hiveConf = new HiveConf(miniDFS.getConfiguration(0), HiveConf.class);
hiveConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:;databaseName=target/metastore_db;create=true");
hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, Utils.format("thrift://{}:{}", HOSTNAME, METASTORE_PORT));
hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST.varname, "localhost");
hiveConf.set("org.jpox.autoCreateSchema", "true");
hiveConf.set("datanucleus.schema.autoCreateTables", "true");
hiveConf.set("hive.metastore.schema.verification", "false");
hiveConf.setInt(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT.varname, HIVE_SERVER_PORT);
// Hive metastore
Callable<Void> metastoreService = () -> {
try {
HiveMetaStore.startMetaStore(METASTORE_PORT, ShimLoader.getHadoopThriftAuthBridge(), hiveConf);
while(true);
} catch (Throwable e) {
throw new Exception("Error starting metastore", e);
}
};
hiveMetastoreExecutor.submit(metastoreService);
NetworkUtils.waitForStartUp(HOSTNAME, METASTORE_PORT, MINICLUSTER_BOOT_RETRY, MINICLUSTER_BOOT_SLEEP);
// HiveServer 2
hiveServer2 = new HiveServer2();
hiveServer2.init(hiveConf);
hiveServer2.start();
writeConfiguration(hiveServer2.getHiveConf(), confDir + "/hive-site.xml");
NetworkUtils.waitForStartUp(HOSTNAME, HIVE_SERVER_PORT, MINICLUSTER_BOOT_RETRY, MINICLUSTER_BOOT_SLEEP);
// JDBC Connection to Hive
Class.forName(HIVE_JDBC_DRIVER);
hiveConnection = HiveMetastoreUtil.getHiveConnection(
getHiveJdbcUrl(),
HadoopSecurityUtil.getLoginUser(conf),
Collections.emptyList()
);
// And finally we're initialized
isHiveInitialized = true;
}
示例9: runLoad
import org.apache.hive.service.server.HiveServer2; //导入方法依赖的package包/类
private int runLoad(boolean disableMrUpdate) throws IOException, InterruptedException, ClassNotFoundException,
SQLException {
Configuration configuration = miniCluster.getMRConfiguration();
writeSiteFiles(configuration);
HiveConf hiveConf = new HiveConf(configuration, getClass());
hiveConf.set("hive.server2.thrift.port", "0");
HiveServer2 hiveServer2 = new HiveServer2();
hiveServer2.init(hiveConf);
hiveServer2.start();
int port = waitForStartupAndGetPort(hiveServer2);
Class.forName(HiveDriver.class.getName());
String userName = UserGroupInformation.getCurrentUser().getShortUserName();
Connection connection = DriverManager.getConnection("jdbc:hive2://localhost:" + port, userName, "");
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
run(connection, "set blur.user.name=" + currentUser.getUserName());
run(connection, "set blur.mr.update.disabled=" + disableMrUpdate);
run(connection, "set hive.metastore.warehouse.dir=" + WAREHOUSE.toURI().toString());
run(connection, "create database if not exists testdb");
run(connection, "use testdb");
run(connection, "CREATE TABLE if not exists testtable ROW FORMAT SERDE 'org.apache.blur.hive.BlurSerDe' "
+ "WITH SERDEPROPERTIES ( 'blur.zookeeper.connection'='" + miniCluster.getZkConnectionString() + "', "
+ "'blur.table'='" + TEST + "', 'blur.family'='" + FAM + "' ) "
+ "STORED BY 'org.apache.blur.hive.BlurHiveStorageHandler'");
run(connection, "desc testtable");
String createLoadTable = buildCreateLoadTable(connection);
run(connection, createLoadTable);
File dbDir = new File(WAREHOUSE, "testdb.db");
File tableDir = new File(dbDir, "loadtable");
int totalRecords = 100;
generateData(tableDir, totalRecords);
run(connection, "select * from loadtable");
run(connection, "set " + BlurSerDe.BLUR_BLOCKING_APPLY + "=true");
run(connection, "insert into table testtable select * from loadtable");
connection.close();
hiveServer2.stop();
return totalRecords;
}