本文整理汇总了Java中org.apache.hadoop.hive.metastore.HiveMetaStore.startMetaStore方法的典型用法代码示例。如果您正苦于以下问题:Java HiveMetaStore.startMetaStore方法的具体用法?Java HiveMetaStore.startMetaStore怎么用?Java HiveMetaStore.startMetaStore使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.metastore.HiveMetaStore
的用法示例。
在下文中一共展示了HiveMetaStore.startMetaStore方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: run
import org.apache.hadoop.hive.metastore.HiveMetaStore; //导入方法依赖的package包/类
@Override
public void run() {
try {
HiveMetaStore.startMetaStore(hiveMetastorePort,
new HadoopThriftAuthBridge(),
hiveConf);
} catch (Throwable t) {
t.printStackTrace();
}
}
示例2: start
import org.apache.hadoop.hive.metastore.HiveMetaStore; //导入方法依赖的package包/类
@Override
public void start() throws IOException {
final HiveConf serverConf = new HiveConf(new Configuration(), this.getClass());
serverConf.set("hive.metastore.local", "false");
serverConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:target/metastore_db;create=true");
//serverConf.set(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS.varname, NotificationListener.class.getName());
File derbyLogFile = new File("target/derby.log");
derbyLogFile.createNewFile();
setSystemProperty("derby.stream.error.file", derbyLogFile.getPath());
serverThread = new Thread(new Runnable() {
@Override
public void run() {
try {
HiveMetaStore.startMetaStore(9083, ShimLoader.getHadoopThriftAuthBridge(),
serverConf);
//LOG.info("Started metastore server on port " + msPort);
}
catch (Throwable e) {
//LOG.error("Metastore Thrift Server threw an exception...", e);
}
}
});
serverThread.setDaemon(true);
serverThread.start();
try {
Thread.sleep(10000L);
} catch (InterruptedException e) {
// do nothing
}
}
示例3: startMetastore
import org.apache.hadoop.hive.metastore.HiveMetaStore; //导入方法依赖的package包/类
private void startMetastore() throws Exception {
Callable<Void> metastoreService = new Callable<Void>() {
public Void call() throws Exception {
try {
HiveMetaStore.startMetaStore(getMetastorePort(conf),
ShimLoader.getHadoopThriftAuthBridge(), conf);
} catch (Throwable e) {
throw new Exception("Error starting metastore", e);
}
return null;
}
};
metaStoreExecutor.submit(metastoreService);
}
示例4: setUpClass
import org.apache.hadoop.hive.metastore.HiveMetaStore; //导入方法依赖的package包/类
/**
* Start all required mini clusters.
*/
@BeforeClass
public static void setUpClass() throws Exception {
// Conf dir
if (!new File(confDir).mkdirs()) {
fail("Failed to create config directories.");
}
// HDFS
File minidfsDir = new File("target/minidfs").getAbsoluteFile();
if (!minidfsDir.exists()) {
Assert.assertTrue(minidfsDir.mkdirs());
}
Set<PosixFilePermission> set = new HashSet<>();
set.add(PosixFilePermission.OWNER_EXECUTE);
set.add(PosixFilePermission.OWNER_READ);
set.add(PosixFilePermission.OWNER_WRITE);
set.add(PosixFilePermission.OTHERS_READ);
java.nio.file.Files.setPosixFilePermissions(minidfsDir.toPath(), set);
System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
final Configuration conf = new HdfsConfiguration();
conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
miniDFS = new MiniDFSCluster.Builder(conf).build();
miniDFS.getFileSystem().setPermission(new Path("/"), FsPermission.createImmutable((short)0777));
miniMR = MiniMRClientClusterFactory.create(BaseHiveIT.class, 1, conf);
writeConfiguration(miniMR.getConfig(), confDir + "/core-site.xml");
writeConfiguration(miniMR.getConfig(), confDir + "/hdfs-site.xml");
writeConfiguration(miniMR.getConfig(), confDir + "/mapred-site.xml");
writeConfiguration(miniMR.getConfig(), confDir + "/yarn-site.xml");
// Configuration for both HMS and HS2
METASTORE_PORT = NetworkUtils.getRandomPort();
HIVE_SERVER_PORT = NetworkUtils.getRandomPort();
final HiveConf hiveConf = new HiveConf(miniDFS.getConfiguration(0), HiveConf.class);
hiveConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:;databaseName=target/metastore_db;create=true");
hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, Utils.format("thrift://{}:{}", HOSTNAME, METASTORE_PORT));
hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST.varname, "localhost");
hiveConf.set("org.jpox.autoCreateSchema", "true");
hiveConf.set("datanucleus.schema.autoCreateTables", "true");
hiveConf.set("hive.metastore.schema.verification", "false");
hiveConf.setInt(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT.varname, HIVE_SERVER_PORT);
// Hive metastore
Callable<Void> metastoreService = () -> {
try {
HiveMetaStore.startMetaStore(METASTORE_PORT, ShimLoader.getHadoopThriftAuthBridge(), hiveConf);
while(true);
} catch (Throwable e) {
throw new Exception("Error starting metastore", e);
}
};
hiveMetastoreExecutor.submit(metastoreService);
NetworkUtils.waitForStartUp(HOSTNAME, METASTORE_PORT, MINICLUSTER_BOOT_RETRY, MINICLUSTER_BOOT_SLEEP);
// HiveServer 2
hiveServer2 = new HiveServer2();
hiveServer2.init(hiveConf);
hiveServer2.start();
writeConfiguration(hiveServer2.getHiveConf(), confDir + "/hive-site.xml");
NetworkUtils.waitForStartUp(HOSTNAME, HIVE_SERVER_PORT, MINICLUSTER_BOOT_RETRY, MINICLUSTER_BOOT_SLEEP);
// JDBC Connection to Hive
Class.forName(HIVE_JDBC_DRIVER);
hiveConnection = HiveMetastoreUtil.getHiveConnection(
getHiveJdbcUrl(),
HadoopSecurityUtil.getLoginUser(conf),
Collections.emptyList()
);
// And finally we're initialized
isHiveInitialized = true;
}