本文整理匯總了Java中org.apache.hadoop.security.token.Token類的典型用法代碼示例。如果您正苦於以下問題:Java Token類的具體用法?Java Token怎麽用?Java Token使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
Token類屬於org.apache.hadoop.security.token包,在下文中一共展示了Token類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testDelegationTokenSecretManager
import org.apache.hadoop.security.token.Token; //導入依賴的package包/類
@Test
public void testDelegationTokenSecretManager() throws Exception {
Token<DelegationTokenIdentifier> token = generateDelegationToken(
"SomeUser", "JobTracker");
// Fake renewer should not be able to renew
try {
dtSecretManager.renewToken(token, "FakeRenewer");
Assert.fail("should have failed");
} catch (AccessControlException ace) {
// PASS
}
dtSecretManager.renewToken(token, "JobTracker");
DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
byte[] tokenId = token.getIdentifier();
identifier.readFields(new DataInputStream(
new ByteArrayInputStream(tokenId)));
Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
LOG.info("Sleep to expire the token");
Thread.sleep(6000);
//Token should be expired
try {
dtSecretManager.retrievePassword(identifier);
//Should not come here
Assert.fail("Token should have expired");
} catch (InvalidToken e) {
//Success
}
dtSecretManager.renewToken(token, "JobTracker");
LOG.info("Sleep beyond the max lifetime");
Thread.sleep(5000);
try {
dtSecretManager.renewToken(token, "JobTracker");
Assert.fail("should have been expired");
} catch (InvalidToken it) {
// PASS
}
}
示例2: checkAccess
import org.apache.hadoop.security.token.Token; //導入依賴的package包/類
/** Check if access should be allowed. userID is not checked if null */
public void checkAccess(Token<BlockTokenIdentifier> token, String userId,
ExtendedBlock block, AccessMode mode) throws InvalidToken {
BlockTokenIdentifier id = new BlockTokenIdentifier();
try {
id.readFields(new DataInputStream(new ByteArrayInputStream(token
.getIdentifier())));
} catch (IOException e) {
throw new InvalidToken(
"Unable to de-serialize block token identifier for user=" + userId
+ ", block=" + block + ", access mode=" + mode);
}
checkAccess(id, userId, block, mode);
if (!Arrays.equals(retrievePassword(id), token.getPassword())) {
throw new InvalidToken("Block token with " + id.toString()
+ " doesn't have the correct token password");
}
}
示例3: renew
import org.apache.hadoop.security.token.Token; //導入依賴的package包/類
@SuppressWarnings("unchecked")
@Override
public long renew(Token<?> token, Configuration conf) throws IOException,
InterruptedException {
final ApplicationClientProtocol rmClient = getRmClient(token, conf);
if (rmClient != null) {
try {
RenewDelegationTokenRequest request =
Records.newRecord(RenewDelegationTokenRequest.class);
request.setDelegationToken(convertToProtoToken(token));
return rmClient.renewDelegationToken(request).getNextExpirationTime();
} catch (YarnException e) {
throw new IOException(e);
} finally {
RPC.stopProxy(rmClient);
}
} else {
return localSecretManager.renewToken(
(Token<RMDelegationTokenIdentifier>)token, getRenewer(token));
}
}
示例4: createBinaryTokenFile
import org.apache.hadoop.security.token.Token; //導入依賴的package包/類
private static void createBinaryTokenFile(Configuration conf) {
// Fetch delegation tokens and store in binary token file.
try {
Credentials cred1 = new Credentials();
Credentials cred2 = new Credentials();
TokenCache.obtainTokensForNamenodesInternal(cred1, new Path[] { p1 },
conf);
for (Token<? extends TokenIdentifier> t : cred1.getAllTokens()) {
cred2.addToken(new Text(DELEGATION_TOKEN_KEY), t);
}
DataOutputStream os = new DataOutputStream(new FileOutputStream(
binaryTokenFileName.toString()));
try {
cred2.writeTokenStorageToStream(os);
} finally {
os.close();
}
} catch (IOException e) {
Assert.fail("Exception " + e);
}
}
示例5: selectToken
import org.apache.hadoop.security.token.Token; //導入依賴的package包/類
@SuppressWarnings("unchecked")
public Token<AMRMTokenIdentifier> selectToken(Text service,
Collection<Token<? extends TokenIdentifier>> tokens) {
if (service == null) {
return null;
}
LOG.debug("Looking for a token with service " + service.toString());
for (Token<? extends TokenIdentifier> token : tokens) {
LOG.debug("Token kind is " + token.getKind().toString()
+ " and the token's service name is " + token.getService());
if (AMRMTokenIdentifier.KIND_NAME.equals(token.getKind())
&& checkService(service, token)) {
return (Token<AMRMTokenIdentifier>) token;
}
}
return null;
}
示例6: doRenewOrCancel
import org.apache.hadoop.security.token.Token; //導入依賴的package包/類
private static void doRenewOrCancel(
final Token<DelegationTokenIdentifier> token, final Configuration conf,
final TokenTestAction action)
throws IOException, InterruptedException {
UserGroupInformation.createRemoteUser("JobTracker").doAs(
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
switch (action) {
case RENEW:
token.renew(conf);
break;
case CANCEL:
token.cancel(conf);
break;
default:
fail("bad action:" + action);
}
return null;
}
});
}
示例7: initialize
import org.apache.hadoop.security.token.Token; //導入依賴的package包/類
@Before
public void initialize() throws Exception {
startHACluster(0, false, false, true);
attemptId = this.cluster.createFakeApplicationAttemptId();
amClient = ClientRMProxy
.createRMProxy(this.conf, ApplicationMasterProtocol.class);
Token<AMRMTokenIdentifier> appToken =
this.cluster.getResourceManager().getRMContext()
.getAMRMTokenSecretManager().createAndGetAMRMToken(attemptId);
appToken.setService(ClientRMProxy.getAMRMTokenService(conf));
UserGroupInformation.setLoginUser(UserGroupInformation
.createRemoteUser(UserGroupInformation.getCurrentUser()
.getUserName()));
UserGroupInformation.getCurrentUser().addToken(appToken);
syncToken(appToken);
}
示例8: testGetDelegationTokensWithCredentials
import org.apache.hadoop.security.token.Token; //導入依賴的package包/類
@Test
public void testGetDelegationTokensWithCredentials() throws IOException {
Credentials credentials = new Credentials();
List<Token<?>> delTokens =
Arrays.asList(fsView.addDelegationTokens("sanjay", credentials));
int expectedTokenCount = getExpectedDelegationTokenCountWithCredentials();
Assert.assertEquals(expectedTokenCount, delTokens.size());
Credentials newCredentials = new Credentials();
for (int i = 0; i < expectedTokenCount / 2; i++) {
Token<?> token = delTokens.get(i);
newCredentials.addToken(token.getService(), token);
}
List<Token<?>> delTokens2 =
Arrays.asList(fsView.addDelegationTokens("sanjay", newCredentials));
Assert.assertEquals((expectedTokenCount + 1) / 2, delTokens2.size());
}
示例9: renewDelegationToken
import org.apache.hadoop.security.token.Token; //導入依賴的package包/類
@SuppressWarnings("unchecked")
@Override
public long renewDelegationToken(final Token<?> token) throws IOException {
// update the kerberos credentials, if they are coming from a keytab
UserGroupInformation connectUgi = ugi.getRealUser();
if (connectUgi == null) {
connectUgi = ugi;
}
try {
return connectUgi.doAs(new PrivilegedExceptionAction<Long>() {
@Override
public Long run() throws Exception {
InetSocketAddress serviceAddr = SecurityUtil
.getTokenServiceAddr(token);
return DelegationTokenFetcher.renewDelegationToken(connectionFactory,
DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
(Token<DelegationTokenIdentifier>) token);
}
});
} catch (InterruptedException e) {
throw new IOException(e);
}
}
示例10: renewDelegationToken
import org.apache.hadoop.security.token.Token; //導入依賴的package包/類
@Override
public RenewDelegationTokenResponse renewDelegationToken(
RenewDelegationTokenRequest request) throws IOException {
if (!isAllowedDelegationTokenOp()) {
throw new IOException(
"Delegation Token can be renewed only with kerberos authentication");
}
org.apache.hadoop.yarn.api.records.Token protoToken = request.getDelegationToken();
Token<MRDelegationTokenIdentifier> token =
new Token<MRDelegationTokenIdentifier>(
protoToken.getIdentifier().array(), protoToken.getPassword()
.array(), new Text(protoToken.getKind()), new Text(
protoToken.getService()));
String user = UserGroupInformation.getCurrentUser().getShortUserName();
long nextExpTime = jhsDTSecretManager.renewToken(token, user);
RenewDelegationTokenResponse renewResponse = Records
.newRecord(RenewDelegationTokenResponse.class);
renewResponse.setNextExpirationTime(nextExpTime);
return renewResponse;
}
示例11: createMockDatanode
import org.apache.hadoop.security.token.Token; //導入依賴的package包/類
private static Server createMockDatanode(BlockTokenSecretManager sm,
Token<BlockTokenIdentifier> token, Configuration conf)
throws IOException, ServiceException {
ClientDatanodeProtocolPB mockDN = mock(ClientDatanodeProtocolPB.class);
BlockTokenIdentifier id = sm.createIdentifier();
id.readFields(new DataInputStream(new ByteArrayInputStream(token
.getIdentifier())));
doAnswer(new GetLengthAnswer(sm, id)).when(mockDN)
.getReplicaVisibleLength(any(RpcController.class),
any(GetReplicaVisibleLengthRequestProto.class));
RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class,
ProtobufRpcEngine.class);
BlockingService service = ClientDatanodeProtocolService
.newReflectiveBlockingService(mockDN);
return new RPC.Builder(conf).setProtocol(ClientDatanodeProtocolPB.class)
.setInstance(service).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
}
示例12: getAuthToken
import org.apache.hadoop.security.token.Token; //導入依賴的package包/類
/**
* Get the authentication token of the user for the cluster specified in the configuration
* @return null if the user does not have the token, otherwise the auth token for the cluster.
*/
private static Token<AuthenticationTokenIdentifier> getAuthToken(Configuration conf, User user)
throws IOException, InterruptedException {
ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "TokenUtil-getAuthToken", null);
try {
String clusterId = ZKClusterId.readClusterIdZNode(zkw);
if (clusterId == null) {
throw new IOException("Failed to get cluster ID");
}
return new AuthenticationTokenSelector().selectToken(new Text(clusterId), user.getTokens());
} catch (KeeperException e) {
throw new IOException(e);
} finally {
zkw.close();
}
}
示例13: cancelDelegationToken
import org.apache.hadoop.security.token.Token; //導入依賴的package包/類
@Override
public CancelDelegationTokenResponse cancelDelegationToken(
CancelDelegationTokenRequest request) throws YarnException {
try {
if (!isAllowedDelegationTokenOp()) {
throw new IOException(
"Delegation Token can be cancelled only with kerberos authentication");
}
org.apache.hadoop.yarn.api.records.Token protoToken = request.getDelegationToken();
Token<RMDelegationTokenIdentifier> token = new Token<RMDelegationTokenIdentifier>(
protoToken.getIdentifier().array(), protoToken.getPassword().array(),
new Text(protoToken.getKind()), new Text(protoToken.getService()));
String user = UserGroupInformation.getCurrentUser().getUserName();
rmDTSecretManager.cancelToken(token, user);
return Records.newRecord(CancelDelegationTokenResponse.class);
} catch (IOException e) {
throw RPCUtil.getRemoteException(e);
}
}
示例14: addDelegationTokens
import org.apache.hadoop.security.token.Token; //導入依賴的package包/類
@Override
public Token<?>[] addDelegationTokens(
final String renewer, Credentials credentials) throws IOException {
Token<?>[] tokens = super.addDelegationTokens(renewer, credentials);
if (dfs.isHDFSEncryptionEnabled()) {
KeyProviderDelegationTokenExtension keyProviderDelegationTokenExtension =
KeyProviderDelegationTokenExtension.
createKeyProviderDelegationTokenExtension(dfs.getKeyProvider());
Token<?>[] kpTokens = keyProviderDelegationTokenExtension.
addDelegationTokens(renewer, credentials);
if (tokens != null && kpTokens != null) {
Token<?>[] all = new Token<?>[tokens.length + kpTokens.length];
System.arraycopy(tokens, 0, all, 0, tokens.length);
System.arraycopy(kpTokens, 0, all, tokens.length, kpTokens.length);
tokens = all;
} else {
tokens = (tokens != null) ? tokens : kpTokens;
}
}
return tokens;
}
示例15: transferRbw
import org.apache.hadoop.security.token.Token; //導入依賴的package包/類
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b,
final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
assertEquals(2, datanodes.length);
final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
datanodes.length, dfsClient);
final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
NetUtils.getOutputStream(s, writeTimeout),
HdfsConstants.SMALL_BUFFER_SIZE));
final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));
// send the request
new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
new StorageType[]{StorageType.DEFAULT});
out.flush();
return BlockOpResponseProto.parseDelimitedFrom(in);
}