本文整理汇总了Java中org.apache.hadoop.hive.ql.session.SessionState.get方法的典型用法代码示例。如果您正苦于以下问题:Java SessionState.get方法的具体用法?Java SessionState.get怎么用?Java SessionState.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.ql.session.SessionState
的用法示例。
在下文中一共展示了SessionState.get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getResourceFiles
import org.apache.hadoop.hive.ql.session.SessionState; //导入方法依赖的package包/类
public static String getResourceFiles(Configuration conf, SessionState.ResourceType t) {
// fill in local files to be added to the task environment
SessionState ss = SessionState.get();
Set<String> files = (ss == null) ? null : ss.list_resource(t, null);
if (files != null) {
List<String> realFiles = new ArrayList<String>(files.size());
for (String one : files) {
try {
realFiles.add(realFile(one, conf));
} catch (IOException e) {
throw new RuntimeException("Cannot validate file " + one + "due to exception: "
+ e.getMessage(), e);
}
}
return StringUtils.join(realFiles, ",");
} else {
return "";
}
}
示例2: getSessionSpecifiedClassLoader
import org.apache.hadoop.hive.ql.session.SessionState; //导入方法依赖的package包/类
/**
* get session specified class loader and get current class loader if fall
*
* @return
*/
public static ClassLoader getSessionSpecifiedClassLoader() {
SessionState state = SessionState.get();
if (state == null || state.getConf() == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Hive Conf not found or Session not initiated, use thread based class loader instead");
}
return JavaUtils.getClassLoader();
}
ClassLoader sessionCL = state.getConf().getClassLoader();
if (sessionCL != null) {
if (LOG.isTraceEnabled()) {
LOG.trace("Use session specified class loader"); //it's normal case
}
return sessionCL;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Session specified class loader not found, use thread based class loader");
}
return JavaUtils.getClassLoader();
}
示例3: analyzeGrantRevokeRole
import org.apache.hadoop.hive.ql.session.SessionState; //导入方法依赖的package包/类
private Task<? extends Serializable> analyzeGrantRevokeRole(boolean isGrant, ASTNode ast,
HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs) throws SemanticException {
List<PrincipalDesc> principalDesc = analyzePrincipalListDef(
(ASTNode) ast.getChild(0));
List<String> roles = new ArrayList<String>();
for (int i = 1; i < ast.getChildCount(); i++) {
roles.add(BaseSemanticAnalyzer.unescapeIdentifier(ast.getChild(i).getText()));
}
String roleOwnerName = "";
if (SessionState.get() != null
&& SessionState.get().getAuthenticator() != null) {
roleOwnerName = SessionState.get().getAuthenticator().getUserName();
}
for (PrincipalDesc princ : principalDesc) {
if (princ.getType() != PrincipalType.GROUP) {
String msg = SentryHiveConstants.GRANT_REVOKE_NOT_SUPPORTED_ON_OBJECT + princ.getType();
throw new SemanticException(msg);
}
}
GrantRevokeRoleDDL grantRevokeRoleDDL = new GrantRevokeRoleDDL(isGrant,
roles, principalDesc, roleOwnerName, PrincipalType.USER, false);
return createTask(new DDLWork(inputs, outputs, grantRevokeRoleDDL));
}
示例4: HiveAuthzBindingHook
import org.apache.hadoop.hive.ql.session.SessionState; //导入方法依赖的package包/类
public HiveAuthzBindingHook() throws Exception {
SessionState session = SessionState.get();
if(session == null) {
throw new IllegalStateException("Session has not been started");
}
// HACK: set a random classname to force the Auth V2 in Hive
SessionState.get().setAuthorizer(null);
HiveConf hiveConf = session.getConf();
if(hiveConf == null) {
throw new IllegalStateException("Session HiveConf is null");
}
authzConf = loadAuthzConf(hiveConf);
hiveAuthzBinding = new HiveAuthzBinding(hiveConf, authzConf);
String serdeWhiteLists = authzConf.get(HiveAuthzConf.HIVE_SENTRY_SERDE_WHITELIST,
HiveAuthzConf.HIVE_SENTRY_SERDE_WHITELIST_DEFAULT);
serdeWhiteList = Arrays.asList(serdeWhiteLists.split(","));
serdeURIPrivilegesEnabled = authzConf.getBoolean(HiveAuthzConf.HIVE_SENTRY_SERDE_URI_PRIVILIEGES_ENABLED,
HiveAuthzConf.HIVE_SENTRY_SERDE_URI_PRIVILIEGES_ENABLED_DEFAULT);
FunctionRegistry.setupPermissionsForBuiltinUDFs("", HiveAuthzConf.HIVE_UDF_BLACK_LIST);
}
示例5: restoreSessionSpecifiedClassLoader
import org.apache.hadoop.hive.ql.session.SessionState; //导入方法依赖的package包/类
public static void restoreSessionSpecifiedClassLoader(ClassLoader prev) {
SessionState state = SessionState.get();
if (state != null && state.getConf() != null) {
ClassLoader current = state.getConf().getClassLoader();
if (current != prev && JavaUtils.closeClassLoadersTo(current, prev)) {
Thread.currentThread().setContextClassLoader(prev);
state.getConf().setClassLoader(prev);
}
}
}
示例6: SessionStateLite
import org.apache.hadoop.hive.ql.session.SessionState; //导入方法依赖的package包/类
/**
* Creates a lightweight representation of the session state.
*
* @param plan The Hive query plan
*/
public SessionStateLite(QueryPlan plan) {
SessionState sessionState = SessionState.get();
this.conf = new HiveConf(sessionState.getConf());
this.cmd = plan.getQueryStr();
this.commandType = plan.getOperationName();
this.queryId = plan.getQueryId();
this.mapRedStats = new HashMap<>(sessionState.getMapRedStats());
}
示例7: getTableQualifiedName
import org.apache.hadoop.hive.ql.session.SessionState; //导入方法依赖的package包/类
/**
* Construct the qualified name used to uniquely identify a Table instance in Atlas.
* @param clusterName Name of the cluster to which the Hive component belongs
* @param dbName Name of the Hive database to which the Table belongs
* @param tableName Name of the Hive table
* @return Unique qualified name to identify the Table instance in Atlas.
*/
public static String getTableQualifiedName(String clusterName, String dbName, String tableName, boolean isTemporaryTable) {
String tableTempName = tableName;
if (isTemporaryTable) {
if (SessionState.get() != null && SessionState.get().getSessionId() != null) {
tableTempName = tableName + TEMP_TABLE_PREFIX + SessionState.get().getSessionId();
} else {
tableTempName = tableName + TEMP_TABLE_PREFIX + RandomStringUtils.random(10);
}
}
return String.format("%s.%[email protected]%s", dbName.toLowerCase(), tableTempName.toLowerCase(), clusterName);
}
示例8: train
import org.apache.hadoop.hive.ql.session.SessionState; //导入方法依赖的package包/类
public String train(String table, String algorithm, String[] args) throws LensException {
MLAlgo algo = getAlgoForName(algorithm);
String modelId = UUID.randomUUID().toString();
log.info("Begin training model " + modelId + ", algo=" + algorithm + ", table=" + table + ", params="
+ Arrays.toString(args));
String database = null;
if (SessionState.get() != null) {
database = SessionState.get().getCurrentDatabase();
} else {
database = "default";
}
MLModel model = algo.train(toLensConf(conf), database, table, modelId, args);
log.info("Done training model: " + modelId);
model.setCreatedAt(new Date());
model.setAlgoName(algorithm);
Path modelLocation = null;
try {
modelLocation = persistModel(model);
log.info("Model saved: " + modelId + ", algo: " + algorithm + ", path: " + modelLocation);
return model.getId();
} catch (IOException e) {
throw new LensException("Error saving model " + modelId + " for algo " + algorithm, e);
}
}
示例9: AbstractQueryContext
import org.apache.hadoop.hive.ql.session.SessionState; //导入方法依赖的package包/类
protected AbstractQueryContext(final String query, final String user, final LensConf qconf, final Configuration conf,
final Collection<LensDriver> drivers, boolean mergeDriverConf) {
if (conf.getBoolean(LensConfConstants.ENABLE_QUERY_METRICS, LensConfConstants.DEFAULT_ENABLE_QUERY_METRICS)) {
UUID metricId = UUID.randomUUID();
conf.set(LensConfConstants.QUERY_METRIC_UNIQUE_ID_CONF_KEY, metricId.toString());
log.info("Generated metric id: {} for query: {}", metricId, query);
}
driverContext = new DriverSelectorQueryContext(query, conf, drivers, mergeDriverConf);
userQuery = query;
phase1RewrittenQuery = query;
this.lensConf = qconf;
this.conf = conf;
this.submittedUser = user;
// we are setting selectedDriverQuery as user query only when the drivers size is 1
// if drivers size is more than the driver query will be set after selection over drivers
if (drivers != null && drivers.size() == 1) {
this.selectedDriverQuery = query;
setSelectedDriver(drivers.iterator().next());
}
// If this is created under an 'acquire' current db would be set
if (SessionState.get() != null) {
String currDb = SessionState.get().getCurrentDatabase();
database = currDb == null ? "default" : currDb;
} else {
database = "default";
}
}
示例10: get
import org.apache.hadoop.hive.ql.session.SessionState; //导入方法依赖的package包/类
public static CommandProcessor get(String cmd, HiveConf conf) {
String cmdl = cmd.toLowerCase();
if ("set".equals(cmdl)) {
return new SetProcessor();
} else if ("reset".equals(cmdl)) {
return new ResetProcessor();
} else if ("dfs".equals(cmdl)) {
SessionState ss = SessionState.get();
return new DfsProcessor(ss.getConf());
} else if ("add".equals(cmdl)) {
return new AddResourceProcessor();
} else if ("delete".equals(cmdl)) {
return new DeleteResourceProcessor();
} else if (!isBlank(cmd)) {
if (conf == null) {
return new SkinDriver();
}
SkinDriver drv = (SkinDriver) mapDrivers.get(conf);
if (drv == null) {
drv = new SkinDriver();
mapDrivers.put(conf, drv);
}
drv.init();
return drv;
}
return null;
}
示例11: restoreSession
import org.apache.hadoop.hive.ql.session.SessionState; //导入方法依赖的package包/类
public void restoreSession(QueryState qs) {
SessionState ss = SessionState.get();
if (ss != null && qs != null && qs.isInitialized()) {
ss.setCmd(qs.getCmd());
ss.setCommandType(qs.getOp());
}
}
示例12: launchTask
import org.apache.hadoop.hive.ql.session.SessionState; //导入方法依赖的package包/类
/**
* Launches a new task
*
* @param tsk
* task being launched
* @param queryId
* Id of the query containing the task
* @param noName
* whether the task has a name set
* @param running
* map from taskresults to taskrunners
* @param jobname
* name of the task, if it is a map-reduce job
* @param jobs
* number of map-reduce jobs
* @param cxt
* the driver context
*/
public void launchTask(Task<? extends Serializable> tsk, String queryId, boolean noName,
Map<TaskResult, TaskRunner> running, String jobname, int jobs, DriverContext cxt) {
if (SessionState.get() != null) {
SessionState.get().getHiveHistory().startTask(queryId, tsk, tsk.getClass().getName());
}
if (tsk.isMapRedTask() && !(tsk instanceof ConditionalTask)) {
if (noName) {
conf.setVar(HiveConf.ConfVars.HADOOPJOBNAME, jobname + "(" + tsk.getId() + ")");
}
conf.set("mapreduce.workflow.node.name", tsk.getId());
Utilities.setWorkflowAdjacencies(conf, plan);
cxt.incCurJobNo(1);
console.printInfo("Launching Job " + cxt.getCurJobNo() + " out of " + jobs);
}
tsk.initialize(conf, plan, cxt);
TaskResult tskRes = new TaskResult();
TaskRunner tskRun = new TaskRunner(tsk, tskRes);
// Launch Task
if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.EXECPARALLEL) && tsk.isMapRedTask()) {
// Launch it in the parallel mode, as a separate thread only for MR tasks
tskRun.start();
} else {
tskRun.runSequential();
}
running.put(tskRes, tskRun);
return;
}
示例13: createGrantTask
import org.apache.hadoop.hive.ql.session.SessionState; //导入方法依赖的package包/类
@Override
public Task<? extends Serializable> createGrantTask(ASTNode ast, HashSet<ReadEntity> inputs,
HashSet<WriteEntity> outputs) throws SemanticException {
List<PrivilegeDesc> privilegeDesc = analyzePrivilegeListDef(
(ASTNode) ast.getChild(0));
List<PrincipalDesc> principalDesc = analyzePrincipalListDef(
(ASTNode) ast.getChild(1));
SentryHivePrivilegeObjectDesc privilegeObj = null;
boolean grantOption = false;
if (ast.getChildCount() > 2) {
for (int i = 2; i < ast.getChildCount(); i++) {
ASTNode astChild = (ASTNode) ast.getChild(i);
if (astChild.getType() == HiveParser.TOK_GRANT_WITH_OPTION) {
grantOption = true;
} else if (astChild.getType() == HiveParser.TOK_PRIV_OBJECT) {
privilegeObj = analyzePrivilegeObject(astChild);
}
}
}
String userName = null;
if (SessionState.get() != null
&& SessionState.get().getAuthenticator() != null) {
userName = SessionState.get().getAuthenticator().getUserName();
}
Preconditions.checkNotNull(privilegeObj, "privilegeObj is null for " + ast.dump());
if (privilegeObj.getPartSpec() != null) {
throw new SemanticException(SentryHiveConstants.PARTITION_PRIVS_NOT_SUPPORTED);
}
for (PrincipalDesc princ : principalDesc) {
if (princ.getType() != PrincipalType.ROLE) {
String msg = SentryHiveConstants.GRANT_REVOKE_NOT_SUPPORTED_FOR_PRINCIPAL + princ.getType();
throw new SemanticException(msg);
}
}
GrantDesc grantDesc = new GrantDesc(privilegeObj, privilegeDesc,
principalDesc, userName, PrincipalType.USER, grantOption);
return createTask(new DDLWork(inputs, outputs, grantDesc));
}
示例14: getCurrentHiveStmtOp
import org.apache.hadoop.hive.ql.session.SessionState; //导入方法依赖的package包/类
private HiveOperation getCurrentHiveStmtOp() {
SessionState sessState = SessionState.get();
if (sessState == null) {
// TODO: Warn
return null;
}
return sessState.getHiveOperation();
}
示例15: HiveAuthzBindingHookV2
import org.apache.hadoop.hive.ql.session.SessionState; //导入方法依赖的package包/类
public HiveAuthzBindingHookV2() throws Exception {
SessionState session = SessionState.get();
if(session == null) {
throw new IllegalStateException("Session has not been started");
}
HiveConf hiveConf = session.getConf();
if(hiveConf == null) {
throw new IllegalStateException("Session HiveConf is null");
}
authzConf = HiveAuthzBindingHook.loadAuthzConf(hiveConf);
hiveAuthzBinding = new HiveAuthzBinding(hiveConf, authzConf);
}