本文整理汇总了Java中org.apache.hadoop.security.UserGroupInformation.doAs方法的典型用法代码示例。如果您正苦于以下问题:Java UserGroupInformation.doAs方法的具体用法?Java UserGroupInformation.doAs怎么用?Java UserGroupInformation.doAs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.security.UserGroupInformation
的用法示例。
在下文中一共展示了UserGroupInformation.doAs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getDelegationToken
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
private org.apache.hadoop.yarn.api.records.Token getDelegationToken(
final UserGroupInformation loggedInUser,
final ApplicationClientProtocol clientRMService, final String renewerString)
throws IOException, InterruptedException {
org.apache.hadoop.yarn.api.records.Token token = loggedInUser
.doAs(new PrivilegedExceptionAction<org.apache.hadoop.yarn.api.records.Token>() {
@Override
public org.apache.hadoop.yarn.api.records.Token run()
throws YarnException, IOException {
GetDelegationTokenRequest request = Records
.newRecord(GetDelegationTokenRequest.class);
request.setRenewer(renewerString);
return clientRMService.getDelegationToken(request)
.getRMDelegationToken();
}
});
return token;
}
示例2: instantiateHistoryProxy
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
protected MRClientProtocol instantiateHistoryProxy()
throws IOException {
final String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);
if (StringUtils.isEmpty(serviceAddr)) {
return null;
}
LOG.debug("Connecting to HistoryServer at: " + serviceAddr);
final YarnRPC rpc = YarnRPC.create(conf);
LOG.debug("Connected to HistoryServer at: " + serviceAddr);
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
return currentUser.doAs(new PrivilegedAction<MRClientProtocol>() {
@Override
public MRClientProtocol run() {
return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class,
NetUtils.createSocketAddr(serviceAddr), conf);
}
});
}
示例3: verifyNewVersionToken
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
private void verifyNewVersionToken(final Configuration conf, final CustomAM am,
Token<ClientToAMTokenIdentifier> token, MockRM rm) throws IOException,
InterruptedException {
UserGroupInformation ugi;
ugi = UserGroupInformation.createRemoteUser("me");
Token<ClientToAMTokenIdentifier> newToken =
new Token<ClientToAMTokenIdentifier>(
new ClientToAMTokenIdentifierForTest(token.decodeIdentifier(), "message"),
am.getClientToAMTokenSecretManager());
newToken.setService(token.getService());
ugi.addToken(newToken);
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
CustomProtocol client =
(CustomProtocol) RPC.getProxy(CustomProtocol.class, 1L, am.address,
conf);
client.ping();
Assert.assertTrue(am.pinged);
return null;
}
});
}
示例4: getKeysMetadata
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@GET
@Path(KMSRESTConstants.KEYS_METADATA_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)
public Response getKeysMetadata(@QueryParam(KMSRESTConstants.KEY)
List<String> keyNamesList) throws Exception {
KMSWebApp.getAdminCallsMeter().mark();
UserGroupInformation user = HttpUserGroupInformation.get();
final String[] keyNames = keyNamesList.toArray(
new String[keyNamesList.size()]);
assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_KEYS_METADATA);
KeyProvider.Metadata[] keysMeta = user.doAs(
new PrivilegedExceptionAction<KeyProvider.Metadata[]>() {
@Override
public KeyProvider.Metadata[] run() throws Exception {
return provider.getKeysMetadata(keyNames);
}
}
);
Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
kmsAudit.ok(user, KMSOp.GET_KEYS_METADATA, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
示例5: testUGILogin
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Test
public void testUGILogin() throws Throwable {
UserGroupInformation ugi = loginUGI(ZOOKEEPER, keytab_zk);
RegistrySecurity.UgiInfo ugiInfo =
new RegistrySecurity.UgiInfo(ugi);
LOG.info("logged in as: {}", ugiInfo);
assertTrue("security is not enabled: " + ugiInfo,
UserGroupInformation.isSecurityEnabled());
assertTrue("login is keytab based: " + ugiInfo,
ugi.isFromKeytab());
// now we are here, build a SASL ACL
ACL acl = ugi.doAs(new PrivilegedExceptionAction<ACL>() {
@Override
public ACL run() throws Exception {
return registrySecurity.createSaslACLFromCurrentUser(0);
}
});
assertEquals(ZOOKEEPER_REALM, acl.getId().getId());
assertEquals(ZookeeperConfigOptions.SCHEME_SASL, acl.getId().getScheme());
registrySecurity.addSystemACL(acl);
}
示例6: getContainerMgrProxy
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
protected ContainerManagementProtocol getContainerMgrProxy(
final ContainerId containerId) {
final NodeId node = masterContainer.getNodeId();
final InetSocketAddress containerManagerBindAddress =
NetUtils.createSocketAddrForHost(node.getHost(), node.getPort());
final YarnRPC rpc = YarnRPC.create(conf); // TODO: Don't create again and again.
UserGroupInformation currentUser =
UserGroupInformation.createRemoteUser(containerId
.getApplicationAttemptId().toString());
String user =
rmContext.getRMApps()
.get(containerId.getApplicationAttemptId().getApplicationId())
.getUser();
org.apache.hadoop.yarn.api.records.Token token =
rmContext.getNMTokenSecretManager().createNMToken(
containerId.getApplicationAttemptId(), node, user);
currentUser.addToken(ConverterUtils.convertFromYarn(token,
containerManagerBindAddress));
return currentUser
.doAs(new PrivilegedAction<ContainerManagementProtocol>() {
@Override
public ContainerManagementProtocol run() {
return (ContainerManagementProtocol) rpc.getProxy(
ContainerManagementProtocol.class,
containerManagerBindAddress, conf);
}
});
}
示例7: submit
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
public static void submit(Configuration conf) throws Exception {
LOG.info("angel python file: " + conf.get("angel.pyangel.pyfile"));
if (null != conf.get("angel.pyangel.pyfile")) {
conf.set(AngelConf.ANGEL_APP_SUBMIT_CLASS, "com.tencent.angel.api.python.PythonRunner");
}
// instance submitter class
final String submitClassName =
conf.get(AngelConf.ANGEL_APP_SUBMIT_CLASS, AngelConf.DEFAULT_ANGEL_APP_SUBMIT_CLASS);
UserGroupInformation ugi = UGITools.getCurrentUser(conf);
ugi.doAs(new PrivilegedExceptionAction<String>() {
@Override public String run() throws Exception {
AppSubmitter submmiter = null;
try {
Class<?> submitClass = Class.forName(submitClassName);
submmiter = (AppSubmitter) submitClass.newInstance();
LOG.info("submitClass: " + submitClass.getName());
} catch (Exception x) {
String message = "load submit class failed " + x.getMessage();
LOG.fatal(message, x);
throw new InvalidParameterException(message);
}
submmiter.submit(conf);
return "OK";
}
});
}
示例8: testDelegationTokenWithDoAs
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Test(timeout = 300000)
public void testDelegationTokenWithDoAs() throws Exception {
final Token<DelegationTokenIdentifier> token =
getDelegationToken(fs, "JobTracker");
final UserGroupInformation longUgi = UserGroupInformation
.createRemoteUser("JobTracker/[email protected]");
final UserGroupInformation shortUgi = UserGroupInformation
.createRemoteUser("JobTracker");
longUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
// try renew with long name
token.renew(conf);
return null;
}
});
shortUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
token.renew(conf);
return null;
}
});
longUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
token.cancel(conf);;
return null;
}
});
}
示例9: initAndStartAggregation
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
private static void initAndStartAggregation(final Configuration conf,
String appUser, final HPCLogAggregateHandler aggregateHandler)
throws IOException, InterruptedException {
UserGroupInformation logAggregatorUgi = UserGroupInformation
.createRemoteUser(appUser);
logAggregatorUgi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
aggregateHandler.init(conf);
aggregateHandler.start();
return null;
}
});
}
示例10: getSplitsWithUGI
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
private void getSplitsWithUGI() throws ExecutionSetupException {
final UserGroupInformation ugi = ImpersonationUtil.createProxyUgi(getUserName());
try {
ugi.doAs(new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
getSplits();
return null;
}
});
} catch (final InterruptedException | IOException e) {
final String errMsg = String.format("Failed to create input splits: %s", e.getMessage());
logger.error(errMsg, e);
throw new DrillRuntimeException(errMsg, e);
}
}
示例11: getDatasetInternal
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
protected DatasetConfig getDatasetInternal(final FileSystemWrapper fs, final FileSelection selection, List<String> tableSchemaPath) {
final UserGroupInformation processUGI = ImpersonationUtil.getProcessUserUGI();
try {
BatchSchema newSchema = processUGI.doAs(
new PrivilegedExceptionAction<BatchSchema>() {
@Override
public BatchSchema run() throws Exception {
final Stopwatch watch = Stopwatch.createStarted();
try {
return getBatchSchema(selection, fs);
} finally {
logger.debug("Took {} ms to sample the schema of table located at: {}",
watch.elapsed(TimeUnit.MILLISECONDS), selection.getSelectionRoot());
}
}
}
);
DatasetType type = fs.isDirectory(new Path(selection.getSelectionRoot())) ?
DatasetType.PHYSICAL_DATASET_SOURCE_FOLDER :
DatasetType.PHYSICAL_DATASET_SOURCE_FILE;
// Merge sampled schema into the existing one, if one already exists
BatchSchema schema = newSchema;
if (oldConfig != null && DatasetHelper.getSchemaBytes(oldConfig) != null) {
schema = BatchSchema.fromDataset(oldConfig).merge(newSchema);
}
return new DatasetConfig()
.setName(tableSchemaPath.get(tableSchemaPath.size() - 1))
.setType(type)
.setFullPathList(tableSchemaPath)
.setSchemaVersion(DatasetHelper.CURRENT_VERSION)
.setRecordSchema(schema.toByteString())
.setPhysicalDataset(new PhysicalDataset()
.setFormatSettings(toFileFormat(formatPlugin).asFileConfig().setLocation(selection.getSelectionRoot())));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
示例12: testFsCache
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
public void testFsCache() throws Exception {
{
long now = System.currentTimeMillis();
String[] users = new String[]{"foo","bar"};
final Configuration conf = new Configuration();
FileSystem[] fs = new FileSystem[users.length];
for(int i = 0; i < users.length; i++) {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(users[i]);
fs[i] = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
public FileSystem run() throws IOException {
return FileSystem.get(conf);
}});
for(int j = 0; j < i; j++) {
assertFalse(fs[j] == fs[i]);
}
}
FileSystem.closeAll();
}
{
try {
runTestCache(NameNode.DEFAULT_PORT);
} catch(java.net.BindException be) {
LOG.warn("Cannot test NameNode.DEFAULT_PORT (="
+ NameNode.DEFAULT_PORT + ")", be);
}
runTestCache(0);
}
}
示例13: checkTokenIdentifier
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private void checkTokenIdentifier(UserGroupInformation ugi, final Token<?> token)
throws Exception {
Assert.assertNotNull(token);
// should be able to use token.decodeIdentifier() but webhdfs isn't
// registered with the service loader for token decoding
DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
byte[] tokenId = token.getIdentifier();
DataInputStream in = new DataInputStream(new ByteArrayInputStream(tokenId));
try {
identifier.readFields(in);
} finally {
in.close();
}
Assert.assertNotNull(identifier);
LOG.info("A valid token should have non-null password, and should be renewed successfully");
Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
dtSecretManager.renewToken((Token<DelegationTokenIdentifier>) token, "JobTracker");
ugi.doAs(
new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
token.renew(config);
token.cancel(config);
return null;
}
});
}
示例14: delete
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
/** Handle HTTP DELETE request. */
@DELETE
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Produces(MediaType.APPLICATION_JSON)
public Response delete(
@Context final UserGroupInformation ugi,
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
final DelegationParam delegation,
@QueryParam(UserParam.NAME) @DefaultValue(UserParam.DEFAULT)
final UserParam username,
@QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT)
final DoAsParam doAsUser,
@PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
@QueryParam(DeleteOpParam.NAME) @DefaultValue(DeleteOpParam.DEFAULT)
final DeleteOpParam op,
@QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT)
final RecursiveParam recursive,
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
final SnapshotNameParam snapshotName
) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, recursive, snapshotName);
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException {
try {
return delete(ugi, delegation, username, doAsUser,
path.getAbsolutePath(), op, recursive, snapshotName);
} finally {
reset();
}
}
});
}
示例15: newInstance
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
/**
* Returns the FileSystem for this URI's scheme and authority and the
* passed user. Internally invokes {@link #newInstance(URI, Configuration)}
* @param uri of the filesystem
* @param conf the configuration to use
* @param user to perform the get as
* @return filesystem instance
* @throws IOException
* @throws InterruptedException
*/
public static FileSystem newInstance(final URI uri, final Configuration conf,
final String user) throws IOException, InterruptedException {
String ticketCachePath =
conf.get(CommonConfigurationKeys.KERBEROS_TICKET_CACHE_PATH);
UserGroupInformation ugi =
UserGroupInformation.getBestUGI(ticketCachePath, user);
return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws IOException {
return newInstance(uri,conf);
}
});
}