本文整理汇总了Java中org.apache.hadoop.security.HadoopKerberosName类的典型用法代码示例。如果您正苦于以下问题:Java HadoopKerberosName类的具体用法?Java HadoopKerberosName怎么用?Java HadoopKerberosName使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
HadoopKerberosName类属于org.apache.hadoop.security包,在下文中一共展示了HadoopKerberosName类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getSaslClientProperties
import org.apache.hadoop.security.HadoopKerberosName; //导入依赖的package包/类
public Map<String, ?> getSaslClientProperties(final DrillbitEndpoint remoteEndpoint,
final Map<String, String> overrides) throws IOException {
final DrillProperties properties = DrillProperties.createEmpty();
final UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
if (loginUser.getAuthenticationMethod() == UserGroupInformation.AuthenticationMethod.KERBEROS) {
final HadoopKerberosName loginPrincipal = new HadoopKerberosName(loginUser.getUserName());
if (!useLoginPrincipal) {
properties.setProperty(DrillProperties.SERVICE_PRINCIPAL,
KerberosUtil.getPrincipalFromParts(loginPrincipal.getShortName(),
remoteEndpoint.getAddress(),
loginPrincipal.getRealm()));
} else {
properties.setProperty(DrillProperties.SERVICE_PRINCIPAL, loginPrincipal.toString());
}
}
properties.merge(overrides);
return properties.stringPropertiesAsMap();
}
示例2: obtainTokensForNamenodesInternal
import org.apache.hadoop.security.HadoopKerberosName; //导入依赖的package包/类
/**
* get delegation tokens for a specific FS
* @param fs
* @param credentials
* @param p
* @param conf
* @throws IOException
*/
private static void obtainTokensForNamenodesInternal(FileSystem fs,
Credentials credentials, Configuration conf) throws IOException {
HadoopKerberosName jtKrbName = new HadoopKerberosName(conf.get(JobTracker.JT_USER_NAME, ""));
String delegTokenRenewer = jtKrbName.getShortName();
mergeBinaryTokens(credentials, conf);
final Token<?> tokens[] = fs.addDelegationTokens(delegTokenRenewer,
credentials);
if (tokens != null) {
for (Token<?> token : tokens) {
LOG.info("Got dt for " + fs.getUri() + "; "+token);
}
}
}
示例3: AbstractDelegationTokenIdentifier
import org.apache.hadoop.security.HadoopKerberosName; //导入依赖的package包/类
public AbstractDelegationTokenIdentifier(Text owner, Text renewer, Text realUser) {
if (owner == null) {
this.owner = new Text();
} else {
this.owner = owner;
}
if (renewer == null) {
this.renewer = new Text();
} else {
HadoopKerberosName renewerKrbName = new HadoopKerberosName(renewer.toString());
try {
this.renewer = new Text(renewerKrbName.getShortName());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
if (realUser == null) {
this.realUser = new Text();
} else {
this.realUser = realUser;
}
issueDate = 0;
maxDate = 0;
}
示例4: setRenewer
import org.apache.hadoop.security.HadoopKerberosName; //导入依赖的package包/类
public void setRenewer(Text renewer) {
if (renewer == null) {
this.renewer = new Text();
} else {
HadoopKerberosName renewerKrbName = new HadoopKerberosName(renewer.toString());
try {
this.renewer = new Text(renewerKrbName.getShortName());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
示例5: testParseTimelineDelegationTokenIdentifierRenewer
import org.apache.hadoop.security.HadoopKerberosName; //导入依赖的package包/类
@Test
public void testParseTimelineDelegationTokenIdentifierRenewer() throws IOException {
// Server side when generation a timeline DT
Configuration conf = new YarnConfiguration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
"RULE:[2:[email protected]$0]([nr][email protected]*EXAMPLE.COM)s/.*/yarn/");
HadoopKerberosName.setConfiguration(conf);
Text owner = new Text("owner");
Text renewer = new Text("rm/[email protected]");
Text realUser = new Text("realUser");
TimelineDelegationTokenIdentifier token =
new TimelineDelegationTokenIdentifier(owner, renewer, realUser);
Assert.assertEquals(new Text("yarn"), token.getRenewer());
}
示例6: testValidKerberosName
import org.apache.hadoop.security.HadoopKerberosName; //导入依赖的package包/类
@Test
public void testValidKerberosName() throws Throwable {
new HadoopKerberosName(ZOOKEEPER).getShortName();
new HadoopKerberosName(ZOOKEEPER_LOCALHOST).getShortName();
new HadoopKerberosName(ZOOKEEPER_REALM).getShortName();
// standard rules don't pick this up
// new HadoopKerberosName(ZOOKEEPER_LOCALHOST_REALM).getShortName();
}
示例7: finalizeSaslSession
import org.apache.hadoop.security.HadoopKerberosName; //导入依赖的package包/类
@Override
public void finalizeSaslSession() throws IOException {
final String authorizationID = getSaslServer().getAuthorizationID();
final String remoteShortName = new HadoopKerberosName(authorizationID).getShortName();
final String localShortName = UserGroupInformation.getLoginUser().getShortUserName();
if (!localShortName.equals(remoteShortName)) {
throw new SaslException(String.format("'primary' part of remote drillbit's service principal " +
"does not match with this drillbit's. Expected: '%s' Actual: '%s'", localShortName, remoteShortName));
}
getLogger().debug("Authenticated connection for {}", authorizationID);
}
示例8: finalizeSaslSession
import org.apache.hadoop.security.HadoopKerberosName; //导入依赖的package包/类
@Override
public void finalizeSaslSession() throws IOException {
final String authorizationID = getSaslServer().getAuthorizationID();
final String userName = new HadoopKerberosName(authorizationID).getShortName();
logger.debug("Created session for {}", userName);
finalizeSession(userName);
}
示例9: main
import org.apache.hadoop.security.HadoopKerberosName; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
opts.parseArgs(Server.class, args);
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
// Parse out the primary/[email protected] from the principal
String principal = SecurityUtil.getServerPrincipal(opts.principal, InetAddress.getLocalHost().getCanonicalHostName());
HadoopKerberosName name = new HadoopKerberosName(principal);
String primary = name.getServiceName();
String instance = name.getHostName();
// Log in using the keytab
UserGroupInformation.loginUserFromKeytab(principal, opts.keytab);
// Get the info from our login
UserGroupInformation serverUser = UserGroupInformation.getLoginUser();
log.info("Current user: {}", serverUser);
// Open the server using the provide dport
TServerSocket serverTransport = new TServerSocket(opts.port);
// Wrap our implementation with the interface's processor
HdfsService.Processor<Iface> processor = new HdfsService.Processor<Iface>(new HdfsServiceImpl(fs));
// Use authorization and confidentiality
Map<String,String> saslProperties = new HashMap<String,String>();
saslProperties.put(Sasl.QOP, "auth-conf");
// Creating the server definition
TSaslServerTransport.Factory saslTransportFactory = new TSaslServerTransport.Factory();
saslTransportFactory.addServerDefinition("GSSAPI", // tell SASL to use GSSAPI, which supports Kerberos
primary, // kerberos primary for server - "myprincipal" in myprincipal/[email protected]
instance, // kerberos instance for server - "my.server.com" in myprincipal/[email protected]
saslProperties, // Properties set, above
new SaslRpcServer.SaslGssCallbackHandler()); // Ensures that authenticated user is the same as the authorized user
// Make sure the TTransportFactory is performing a UGI.doAs
TTransportFactory ugiTransportFactory = new TUGIAssumingTransportFactory(saslTransportFactory, serverUser);
// Processor which takes the UGI for the RPC call, proxy that user on the server login, and then run as the proxied user
TUGIAssumingProcessor ugiProcessor = new TUGIAssumingProcessor(processor);
// Make a simple TTheadPoolServer with the processor and transport factory
TServer server = new TThreadPoolServer(new TThreadPoolServer.Args(serverTransport).transportFactory(ugiTransportFactory).processor(ugiProcessor));
// Start the thrift server
server.serve();
}