本文整理汇总了Java中org.apache.hadoop.security.UserGroupInformation.getCurrentUser方法的典型用法代码示例。如果您正苦于以下问题:Java UserGroupInformation.getCurrentUser方法的具体用法?Java UserGroupInformation.getCurrentUser怎么用?Java UserGroupInformation.getCurrentUser使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.security.UserGroupInformation
的用法示例。
在下文中一共展示了UserGroupInformation.getCurrentUser方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testInheritedQueueAcls
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Test
public void testInheritedQueueAcls() throws IOException {
UserGroupInformation user = UserGroupInformation.getCurrentUser();
LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A));
LeafQueue b = stubLeafQueue((LeafQueue)queues.get(B));
ParentQueue c = (ParentQueue)queues.get(C);
LeafQueue c1 = stubLeafQueue((LeafQueue)queues.get(C1));
assertFalse(root.hasAccess(QueueACL.SUBMIT_APPLICATIONS, user));
assertTrue(a.hasAccess(QueueACL.SUBMIT_APPLICATIONS, user));
assertTrue(b.hasAccess(QueueACL.SUBMIT_APPLICATIONS, user));
assertFalse(c.hasAccess(QueueACL.SUBMIT_APPLICATIONS, user));
assertFalse(c1.hasAccess(QueueACL.SUBMIT_APPLICATIONS, user));
assertTrue(hasQueueACL(
a.getQueueUserAclInfo(user), QueueACL.SUBMIT_APPLICATIONS));
assertTrue(hasQueueACL(
b.getQueueUserAclInfo(user), QueueACL.SUBMIT_APPLICATIONS));
assertFalse(hasQueueACL(
c.getQueueUserAclInfo(user), QueueACL.SUBMIT_APPLICATIONS));
assertFalse(hasQueueACL(
c1.getQueueUserAclInfo(user), QueueACL.SUBMIT_APPLICATIONS));
}
示例2: killTaskAttempt
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public KillTaskAttemptResponse killTaskAttempt(
KillTaskAttemptRequest request) throws IOException {
TaskAttemptId taskAttemptId = request.getTaskAttemptId();
UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
String message = "Kill task attempt " + taskAttemptId
+ " received from " + callerUGI + " at "
+ Server.getRemoteAddress();
LOG.info(message);
verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB);
appContext.getEventHandler().handle(
new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message));
appContext.getEventHandler().handle(
new TaskAttemptEvent(taskAttemptId,
TaskAttemptEventType.TA_KILL));
KillTaskAttemptResponse response =
recordFactory.newRecordInstance(KillTaskAttemptResponse.class);
return response;
}
示例3: checkReservationACLs
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
private String checkReservationACLs(String queueName, String auditConstant)
throws YarnException {
UserGroupInformation callerUGI;
try {
callerUGI = UserGroupInformation.getCurrentUser();
} catch (IOException ie) {
RMAuditLogger.logFailure("UNKNOWN", auditConstant, queueName,
"ClientRMService", "Error getting UGI");
throw RPCUtil.getRemoteException(ie);
}
// Check if user has access on the managed queue
if (!queueACLsManager.checkAccess(callerUGI, QueueACL.SUBMIT_APPLICATIONS,
queueName)) {
RMAuditLogger.logFailure(
callerUGI.getShortUserName(),
auditConstant,
"User doesn't have permissions to "
+ QueueACL.SUBMIT_APPLICATIONS.toString(), "ClientRMService",
AuditConstants.UNAUTHORIZED_USER);
throw RPCUtil.getRemoteException(new AccessControlException("User "
+ callerUGI.getShortUserName() + " cannot perform operation "
+ QueueACL.SUBMIT_APPLICATIONS.name() + " on queue" + queueName));
}
return callerUGI.getShortUserName();
}
示例4: testCompareGridmixJob
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Test (timeout=30000)
public void testCompareGridmixJob() throws Exception {
Configuration conf = new Configuration();
Path outRoot = new Path("target");
JobStory jobDesc = mock(JobStory.class);
when(jobDesc.getName()).thenReturn("JobName");
when(jobDesc.getJobConf()).thenReturn(new JobConf(conf));
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
GridmixJob j1 = new LoadJob(conf, 1000L, jobDesc, outRoot, ugi, 0);
GridmixJob j2 = new LoadJob(conf, 1000L, jobDesc, outRoot, ugi, 0);
GridmixJob j3 = new LoadJob(conf, 1000L, jobDesc, outRoot, ugi, 1);
GridmixJob j4 = new LoadJob(conf, 1000L, jobDesc, outRoot, ugi, 1);
assertTrue(j1.equals(j2));
assertEquals(0, j1.compareTo(j2));
// Only one parameter matters
assertFalse(j1.equals(j3));
// compare id and submissionMillis
assertEquals(-1, j1.compareTo(j3));
assertEquals(-1, j1.compareTo(j4));
}
示例5: checkRpcAdminAccess
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Override
protected void checkRpcAdminAccess() throws IOException, AccessControlException {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
UserGroupInformation zkfcUgi = UserGroupInformation.getLoginUser();
if (adminAcl.isUserAllowed(ugi) ||
ugi.getShortUserName().equals(zkfcUgi.getShortUserName())) {
LOG.info("Allowed RPC access from " + ugi + " at " + Server.getRemoteAddress());
return;
}
String msg = "Disallowed RPC access from " + ugi + " at " +
Server.getRemoteAddress() + ". Not listed in " + DFSConfigKeys.DFS_ADMIN;
LOG.warn(msg);
throw new AccessControlException(msg);
}
示例6: connectToServerAndGetDelegationToken
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
void connectToServerAndGetDelegationToken(
final Configuration conf, final InetSocketAddress addr) throws IOException {
MiniProtocol client = null;
try {
UserGroupInformation current = UserGroupInformation.getCurrentUser();
UserGroupInformation proxyUserUgi =
UserGroupInformation.createProxyUserForTesting(
MINI_USER, current, GROUP_NAMES);
try {
client = proxyUserUgi.doAs(new PrivilegedExceptionAction<MiniProtocol>() {
@Override
public MiniProtocol run() throws IOException {
MiniProtocol p = RPC.getProxy(MiniProtocol.class,
MiniProtocol.versionID, addr, conf);
Token<TestDelegationTokenIdentifier> token;
token = p.getDelegationToken(new Text(RENEWER));
currentUgi = UserGroupInformation.createUserForTesting(MINI_USER,
GROUP_NAMES);
SecurityUtil.setTokenService(token, addr);
currentUgi.addToken(token);
return p;
}
});
} catch (InterruptedException e) {
Assert.fail(Arrays.toString(e.getStackTrace()));
}
} finally {
RPC.stopProxy(client);
}
}
示例7: getQueueUserAclInfo
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Override
public List<QueueUserACLInfo> getQueueUserAclInfo() {
UserGroupInformation user;
try {
user = UserGroupInformation.getCurrentUser();
} catch (IOException ioe) {
return new ArrayList<QueueUserACLInfo>();
}
return queueMgr.getRootQueue().getQueueUserAclInfo(user);
}
示例8: serviceStart
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Override
protected void serviceStart() throws Exception {
if (UserGroupInformation.isSecurityEnabled()) {
loginUGI = UserGroupInformation.getLoginUser();
} else {
loginUGI = UserGroupInformation.getCurrentUser();
}
clientRpcServer.start();
}
示例9: checkAccess
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
private void checkAccess(Job job, JobACL jobOperation)
throws IOException {
UserGroupInformation callerUGI;
callerUGI = UserGroupInformation.getCurrentUser();
if (!job.checkAccess(callerUGI, jobOperation)) {
throw new IOException(new AccessControlException("User "
+ callerUGI.getShortUserName() + " cannot perform operation "
+ jobOperation.name() + " on " + job.getID()));
}
}
示例10: getApplicationAttempts
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Override
public GetApplicationAttemptsResponse getApplicationAttempts(
GetApplicationAttemptsRequest request) throws YarnException, IOException {
ApplicationId appId = request.getApplicationId();
UserGroupInformation callerUGI;
try {
callerUGI = UserGroupInformation.getCurrentUser();
} catch (IOException ie) {
LOG.info("Error getting UGI ", ie);
throw RPCUtil.getRemoteException(ie);
}
RMApp application = this.rmContext.getRMApps().get(appId);
if (application == null) {
// If the RM doesn't have the application, throw
// ApplicationNotFoundException and let client to handle.
throw new ApplicationNotFoundException("Application with id '" + appId
+ "' doesn't exist in RM.");
}
boolean allowAccess = checkAccess(callerUGI, application.getUser(),
ApplicationAccessType.VIEW_APP, application);
GetApplicationAttemptsResponse response = null;
if (allowAccess) {
Map<ApplicationAttemptId, RMAppAttempt> attempts = application
.getAppAttempts();
List<ApplicationAttemptReport> listAttempts =
new ArrayList<ApplicationAttemptReport>();
Iterator<Map.Entry<ApplicationAttemptId, RMAppAttempt>> iter = attempts
.entrySet().iterator();
while (iter.hasNext()) {
listAttempts.add(iter.next().getValue()
.createApplicationAttemptReport());
}
response = GetApplicationAttemptsResponse.newInstance(listAttempts);
} else {
throw new YarnException("User " + callerUGI.getShortUserName()
+ " does not have privilage to see this aplication " + appId);
}
return response;
}
示例11: release
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Override
public ReleaseSharedCacheResourceResponse release(
ReleaseSharedCacheResourceRequest request) throws YarnException,
IOException {
ReleaseSharedCacheResourceResponse response =
recordFactory
.newRecordInstance(ReleaseSharedCacheResourceResponse.class);
UserGroupInformation callerUGI;
try {
callerUGI = UserGroupInformation.getCurrentUser();
} catch (IOException ie) {
LOG.info("Error getting UGI ", ie);
throw RPCUtil.getRemoteException(ie);
}
boolean removed =
this.store.removeResourceReference(
request.getResourceKey(),
new SharedCacheResourceReference(request.getAppId(), callerUGI
.getShortUserName()), true);
if (removed) {
this.metrics.incCacheRelease();
}
return response;
}
示例12: forceKillApplication
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public KillApplicationResponse forceKillApplication(
KillApplicationRequest request) throws YarnException {
ApplicationId applicationId = request.getApplicationId();
UserGroupInformation callerUGI;
try {
callerUGI = UserGroupInformation.getCurrentUser();
} catch (IOException ie) {
LOG.info("Error getting UGI ", ie);
RMAuditLogger.logFailure("UNKNOWN", AuditConstants.KILL_APP_REQUEST,
"UNKNOWN", "ClientRMService" , "Error getting UGI",
applicationId);
throw RPCUtil.getRemoteException(ie);
}
RMApp application = this.rmContext.getRMApps().get(applicationId);
if (application == null) {
RMAuditLogger.logFailure(callerUGI.getUserName(),
AuditConstants.KILL_APP_REQUEST, "UNKNOWN", "ClientRMService",
"Trying to kill an absent application", applicationId);
throw new ApplicationNotFoundException("Trying to kill an absent"
+ " application " + applicationId);
}
if (!checkAccess(callerUGI, application.getUser(),
ApplicationAccessType.MODIFY_APP, application)) {
RMAuditLogger.logFailure(callerUGI.getShortUserName(),
AuditConstants.KILL_APP_REQUEST,
"User doesn't have permissions to "
+ ApplicationAccessType.MODIFY_APP.toString(), "ClientRMService",
AuditConstants.UNAUTHORIZED_USER, applicationId);
throw RPCUtil.getRemoteException(new AccessControlException("User "
+ callerUGI.getShortUserName() + " cannot perform operation "
+ ApplicationAccessType.MODIFY_APP.name() + " on " + applicationId));
}
if (application.isAppFinalStateStored()) {
RMAuditLogger.logSuccess(callerUGI.getShortUserName(),
AuditConstants.KILL_APP_REQUEST, "ClientRMService", applicationId);
return KillApplicationResponse.newInstance(true);
}
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMAppEvent(applicationId, RMAppEventType.KILL));
// For UnmanagedAMs, return true so they don't retry
return KillApplicationResponse.newInstance(
application.getApplicationSubmissionContext().getUnmanagedAM());
}
示例13: InodeTree
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
/**
* Create Inode Tree from the specified mount-table specified in Config
* @param config - the mount table keys are prefixed with
* FsConstants.CONFIG_VIEWFS_PREFIX
* @param viewName - the name of the mount table - if null use defaultMT name
* @throws UnsupportedFileSystemException
* @throws URISyntaxException
* @throws FileAlreadyExistsException
* @throws IOException
*/
protected InodeTree(final Configuration config, final String viewName)
throws UnsupportedFileSystemException, URISyntaxException,
FileAlreadyExistsException, IOException {
String vName = viewName;
if (vName == null) {
vName = Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE;
}
homedirPrefix = ConfigUtil.getHomeDirValue(config, vName);
root = new INodeDir<T>("/", UserGroupInformation.getCurrentUser());
root.InodeDirFs = getTargetFileSystem(root);
root.isRoot = true;
final String mtPrefix = Constants.CONFIG_VIEWFS_PREFIX + "." +
vName + ".";
final String linkPrefix = Constants.CONFIG_VIEWFS_LINK + ".";
final String linkMergePrefix = Constants.CONFIG_VIEWFS_LINK_MERGE + ".";
boolean gotMountTableEntry = false;
final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
for (Entry<String, String> si : config) {
final String key = si.getKey();
if (key.startsWith(mtPrefix)) {
gotMountTableEntry = true;
boolean isMergeLink = false;
String src = key.substring(mtPrefix.length());
if (src.startsWith(linkPrefix)) {
src = src.substring(linkPrefix.length());
} else if (src.startsWith(linkMergePrefix)) { // A merge link
isMergeLink = true;
src = src.substring(linkMergePrefix.length());
} else if (src.startsWith(Constants.CONFIG_VIEWFS_HOMEDIR)) {
// ignore - we set home dir from config
continue;
} else {
throw new IOException(
"ViewFs: Cannot initialize: Invalid entry in Mount table in config: "+
src);
}
final String target = si.getValue(); // link or merge link
createLink(src, target, isMergeLink, ugi);
}
}
if (!gotMountTableEntry) {
throw new IOException(
"ViewFs: Cannot initialize: Empty Mount table in config for " +
"viewfs://" + vName + "/");
}
}
示例14: InodeTree
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
/**
* Create Inode Tree from the specified mount-table specified in Config
* @param config - the mount table keys are prefixed with
* FsConstants.CONFIG_VIEWFS_PREFIX
* @param viewName - the name of the mount table - if null use defaultMT name
* @throws UnsupportedFileSystemException
* @throws URISyntaxException
* @throws FileAlreadyExistsException
* @throws IOException
*/
protected InodeTree(final Configuration config, final String viewName)
throws UnsupportedFileSystemException, URISyntaxException,
FileAlreadyExistsException, IOException {
String vName = viewName;
if (vName == null) {
vName = Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE;
}
homedirPrefix = ConfigUtil.getHomeDirValue(config, vName);
root = new INodeDir<T>("/", UserGroupInformation.getCurrentUser());
root.InodeDirFs = getTargetFileSystem(root);
root.isRoot = true;
final String mtPrefix = Constants.CONFIG_VIEWFS_PREFIX + "." +
vName + ".";
final String linkPrefix = Constants.CONFIG_VIEWFS_LINK + ".";
final String linkMergePrefix = Constants.CONFIG_VIEWFS_LINK_MERGE + ".";
boolean gotMountTableEntry = false;
final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
for (Entry<String, String> si : config) {
final String key = si.getKey();
if (key.startsWith(mtPrefix)) {
gotMountTableEntry = true;
boolean isMergeLink = false;
String src = key.substring(mtPrefix.length());
if (src.startsWith(linkPrefix)) {
src = src.substring(linkPrefix.length());
} else if (src.startsWith(linkMergePrefix)) { // A merge link
isMergeLink = true;
src = src.substring(linkMergePrefix.length());
} else if (src.startsWith(Constants.CONFIG_VIEWFS_HOMEDIR)) {
// ignore - we set home dir from config
continue;
} else {
throw new IOException(
"ViewFs: Cannot initialize: Invalid entry in Mount table in config: "+
src);
}
final String target = si.getValue(); // link or merge link
createLink(src, target, isMergeLink, ugi);
}
}
if (!gotMountTableEntry) {
throw new IOException(
"ViewFs: Cannot initialize: Empty Mount table in config for " +
vName == null ? "viewfs:///" : ("viewfs://" + vName + "/"));
}
}
示例15: UserProvider
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
private UserProvider() throws IOException {
user = UserGroupInformation.getCurrentUser();
credentials = user.getCredentials();
}