本文整理汇总了Java中org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager.retrievePassword方法的典型用法代码示例。如果您正苦于以下问题:Java DelegationTokenSecretManager.retrievePassword方法的具体用法?Java DelegationTokenSecretManager.retrievePassword怎么用?Java DelegationTokenSecretManager.retrievePassword使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager
的用法示例。
在下文中一共展示了DelegationTokenSecretManager.retrievePassword方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testDelegationTokenSecretManager
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; //导入方法依赖的package包/类
@Test
public void testDelegationTokenSecretManager() throws Exception {
DelegationTokenSecretManager dtSecretManager = cluster.getNameNode()
.getNamesystem().getDelegationTokenSecretManager();
Token<DelegationTokenIdentifier> token = generateDelegationToken(
"SomeUser", "JobTracker");
// Fake renewer should not be able to renew
try {
dtSecretManager.renewToken(token, "FakeRenewer");
Assert.fail("should have failed");
} catch (AccessControlException ace) {
// PASS
}
dtSecretManager.renewToken(token, "JobTracker");
DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
byte[] tokenId = token.getIdentifier();
identifier.readFields(new DataInputStream(
new ByteArrayInputStream(tokenId)));
Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
LOG.info("Sleep to expire the token");
Thread.sleep(6000);
//Token should be expired
try {
dtSecretManager.retrievePassword(identifier);
//Should not come here
Assert.fail("Token should have expired");
} catch (InvalidToken e) {
//Success
}
dtSecretManager.renewToken(token, "JobTracker");
LOG.info("Sleep beyond the max lifetime");
Thread.sleep(5000);
try {
dtSecretManager.renewToken(token, "JobTracker");
Assert.fail("should have been expired");
} catch (InvalidToken it) {
// PASS
}
}
示例2: testDelegationTokenSecretManager
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; //导入方法依赖的package包/类
@Test
public void testDelegationTokenSecretManager() throws Exception {
DelegationTokenSecretManager dtSecretManager = cluster.getNamesystem()
.getDelegationTokenSecretManager();
Token<DelegationTokenIdentifier> token = generateDelegationToken(
"SomeUser", "JobTracker");
// Fake renewer should not be able to renew
try {
dtSecretManager.renewToken(token, "FakeRenewer");
Assert.fail("should have failed");
} catch (AccessControlException ace) {
// PASS
}
dtSecretManager.renewToken(token, "JobTracker");
DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
byte[] tokenId = token.getIdentifier();
identifier.readFields(new DataInputStream(
new ByteArrayInputStream(tokenId)));
Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
LOG.info("Sleep to expire the token");
Thread.sleep(6000);
//Token should be expired
try {
dtSecretManager.retrievePassword(identifier);
//Should not come here
Assert.fail("Token should have expired");
} catch (InvalidToken e) {
//Success
}
dtSecretManager.renewToken(token, "JobTracker");
LOG.info("Sleep beyond the max lifetime");
Thread.sleep(5000);
try {
dtSecretManager.renewToken(token, "JobTracker");
Assert.fail("should have been expired");
} catch (InvalidToken it) {
// PASS
}
}
示例3: testClientFailoverWhenStandbyNNHasStaleCredentials
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; //导入方法依赖的package包/类
@Test
public void testClientFailoverWhenStandbyNNHasStaleCredentials()
throws IOException {
Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
conf.setBoolean(DFSConfigKeys
.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
MiniDFSCluster cluster = null;
WebHdfsFileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo).numDataNodes(
0).build();
HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
cluster.waitActive();
fs = (WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf);
cluster.transitionToActive(0);
Token<?> token = fs.getDelegationToken(null);
final DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
identifier.readFields(
new DataInputStream(new ByteArrayInputStream(token.getIdentifier())));
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
final DelegationTokenSecretManager secretManager = NameNodeAdapter.getDtSecretManager(
cluster.getNamesystem(0));
ExceptionHandler eh = new ExceptionHandler();
eh.initResponse(mock(HttpServletResponse.class));
Response resp = null;
try {
secretManager.retrievePassword(identifier);
} catch (IOException e) {
// Mimic the UserProvider class logic (server side) by throwing
// SecurityException here
Assert.assertTrue(e instanceof SecretManager.InvalidToken);
resp = eh.toResponse(new SecurityException(e));
}
// The Response (resp) below is what the server will send to client
//
// BEFORE HDFS-6475 fix, the resp.entity is
// {"RemoteException":{"exception":"SecurityException",
// "javaClassName":"java.lang.SecurityException",
// "message":"Failed to obtain user group information:
// org.apache.hadoop.security.token.SecretManager$InvalidToken:
// StandbyException"}}
// AFTER the fix, the resp.entity is
// {"RemoteException":{"exception":"StandbyException",
// "javaClassName":"org.apache.hadoop.ipc.StandbyException",
// "message":"Operation category READ is not supported in
// state standby"}}
//
// Mimic the client side logic by parsing the response from server
//
Map<?, ?> m = (Map<?, ?>) JSON.parse(resp.getEntity().toString());
RemoteException re = JsonUtilClient.toRemoteException(m);
Exception unwrapped = re.unwrapRemoteException(StandbyException.class);
Assert.assertTrue(unwrapped instanceof StandbyException);
} finally {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
}