本文整理匯總了Java中org.apache.hadoop.security.token.Token.getPassword方法的典型用法代碼示例。如果您正苦於以下問題:Java Token.getPassword方法的具體用法?Java Token.getPassword怎麽用?Java Token.getPassword使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.security.token.Token
的用法示例。
在下文中一共展示了Token.getPassword方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testRollMasterKey
import org.apache.hadoop.security.token.Token; //導入方法依賴的package包/類
@Test(timeout = 10000)
public void testRollMasterKey() throws Exception {
TestDelegationTokenSecretManager dtSecretManager =
new TestDelegationTokenSecretManager(800,
800,1*1000,3600000);
try {
dtSecretManager.startThreads();
//generate a token and store the password
Token<TestDelegationTokenIdentifier> token = generateDelegationToken(
dtSecretManager, "SomeUser", "JobTracker");
byte[] oldPasswd = token.getPassword();
//store the length of the keys list
int prevNumKeys = dtSecretManager.getAllKeys().length;
dtSecretManager.rollMasterKey();
Assert.assertTrue(dtSecretManager.isStoreNewMasterKeyCalled);
//after rolling, the length of the keys list must increase
int currNumKeys = dtSecretManager.getAllKeys().length;
Assert.assertEquals((currNumKeys - prevNumKeys) >= 1, true);
//after rolling, the token that was generated earlier must
//still be valid (retrievePassword will fail if the token
//is not valid)
ByteArrayInputStream bi =
new ByteArrayInputStream(token.getIdentifier());
TestDelegationTokenIdentifier identifier =
dtSecretManager.createIdentifier();
identifier.readFields(new DataInputStream(bi));
byte[] newPasswd =
dtSecretManager.retrievePassword(identifier);
//compare the passwords
Assert.assertEquals(oldPasswd, newPasswd);
// wait for keys to expire
while(!dtSecretManager.isRemoveStoredMasterKeyCalled) {
Thread.sleep(200);
}
} finally {
dtSecretManager.stopThreads();
}
}
示例2: verifyTamperedToken
import org.apache.hadoop.security.token.Token; //導入方法依賴的package包/類
private void verifyTamperedToken(final Configuration conf, final CustomAM am,
Token<ClientToAMTokenIdentifier> token, UserGroupInformation ugi,
ClientToAMTokenIdentifier maliciousID) {
Token<ClientToAMTokenIdentifier> maliciousToken =
new Token<ClientToAMTokenIdentifier>(maliciousID.getBytes(),
token.getPassword(), token.getKind(),
token.getService());
ugi.addToken(maliciousToken);
try {
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
CustomProtocol client =
(CustomProtocol) RPC.getProxy(CustomProtocol.class, 1L,
am.address, conf);
client.ping();
fail("Connection initiation with illegally modified "
+ "tokens is expected to fail.");
return null;
} catch (YarnException ex) {
fail("Cannot get a YARN remote exception as "
+ "it will indicate RPC success");
throw ex;
}
}
});
} catch (Exception e) {
Assert.assertEquals(RemoteException.class.getName(), e.getClass()
.getName());
e = ((RemoteException)e).unwrapRemoteException();
Assert
.assertEquals(SaslException.class
.getCanonicalName(), e.getClass().getCanonicalName());
Assert.assertTrue(e
.getMessage()
.contains(
"DIGEST-MD5: digest response format violation. "
+ "Mismatched response."));
Assert.assertFalse(am.pinged);
}
}
示例3: configureTask
import org.apache.hadoop.security.token.Token; //導入方法依賴的package包/類
private static void configureTask(JobConf job, Task task,
Credentials credentials, Token<JobTokenIdentifier> jt) throws IOException {
job.setCredentials(credentials);
ApplicationAttemptId appAttemptId =
ConverterUtils.toContainerId(
System.getenv(Environment.CONTAINER_ID.name()))
.getApplicationAttemptId();
LOG.debug("APPLICATION_ATTEMPT_ID: " + appAttemptId);
// Set it in conf, so as to be able to be used the the OutputCommitter.
job.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID,
appAttemptId.getAttemptId());
// set tcp nodelay
job.setBoolean("ipc.client.tcpnodelay", true);
job.setClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS,
YarnOutputFiles.class, MapOutputFile.class);
// set the jobToken and shuffle secrets into task
task.setJobTokenSecret(
JobTokenSecretManager.createSecretKey(jt.getPassword()));
byte[] shuffleSecret = TokenCache.getShuffleSecretKey(credentials);
if (shuffleSecret == null) {
LOG.warn("Shuffle secret missing from task credentials."
+ " Using job token secret as shuffle secret.");
shuffleSecret = jt.getPassword();
}
task.setShuffleSecret(
JobTokenSecretManager.createSecretKey(shuffleSecret));
// setup the child's MRConfig.LOCAL_DIR.
configureLocalDirs(task, job);
// setup the child's attempt directories
// Do the task-type specific localization
task.localizeConfiguration(job);
// Set up the DistributedCache related configs
MRApps.setupDistributedCacheLocal(job);
// Overwrite the localized task jobconf which is linked to in the current
// work-dir.
Path localTaskFile = new Path(MRJobConfig.JOB_CONF_FILE);
writeLocalJobFile(localTaskFile, job);
task.setJobFile(localTaskFile.toString());
task.setConf(job);
}
示例4: Application
import org.apache.hadoop.security.token.Token; //導入方法依賴的package包/類
/**
* Start the child process to handle the task for us.
* @param conf the task's configuration
* @param recordReader the fake record reader to update progress with
* @param output the collector to send output to
* @param reporter the reporter for the task
* @param outputKeyClass the class of the output keys
* @param outputValueClass the class of the output values
* @throws IOException
* @throws InterruptedException
*/
Application(JobConf conf,
RecordReader<FloatWritable, NullWritable> recordReader,
OutputCollector<K2,V2> output, Reporter reporter,
Class<? extends K2> outputKeyClass,
Class<? extends V2> outputValueClass
) throws IOException, InterruptedException {
serverSocket = new ServerSocket(0);
Map<String, String> env = new HashMap<String,String>();
// add TMPDIR environment variable with the value of java.io.tmpdir
env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
env.put(Submitter.PORT,
Integer.toString(serverSocket.getLocalPort()));
//Add token to the environment if security is enabled
Token<JobTokenIdentifier> jobToken = TokenCache.getJobToken(conf
.getCredentials());
// This password is used as shared secret key between this application and
// child pipes process
byte[] password = jobToken.getPassword();
String localPasswordFile = new File(".") + Path.SEPARATOR
+ "jobTokenPassword";
writePasswordToLocalFile(localPasswordFile, password, conf);
env.put("hadoop.pipes.shared.secret.location", localPasswordFile);
List<String> cmd = new ArrayList<String>();
String interpretor = conf.get(Submitter.INTERPRETOR);
if (interpretor != null) {
cmd.add(interpretor);
}
String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();
if (!FileUtil.canExecute(new File(executable))) {
// LinuxTaskController sets +x permissions on all distcache files already.
// In case of DefaultTaskController, set permissions here.
FileUtil.chmod(executable, "u+x");
}
cmd.add(executable);
// wrap the command in a stdout/stderr capture
// we are starting map/reduce task of the pipes job. this is not a cleanup
// attempt.
TaskAttemptID taskid =
TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID));
File stdout = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDOUT);
File stderr = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDERR);
long logLength = TaskLog.getTaskLogLength(conf);
cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength,
false);
process = runClient(cmd, env);
clientSocket = serverSocket.accept();
String challenge = getSecurityChallenge();
String digestToSend = createDigest(password, challenge);
String digestExpected = createDigest(password, digestToSend);
handler = new OutputHandler<K2, V2>(output, reporter, recordReader,
digestExpected);
K2 outputKey = (K2)
ReflectionUtils.newInstance(outputKeyClass, conf);
V2 outputValue = (V2)
ReflectionUtils.newInstance(outputValueClass, conf);
downlink = new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler,
outputKey, outputValue, conf);
downlink.authenticate(digestToSend, challenge);
waitForAuthentication();
LOG.debug("Authentication succeeded");
downlink.start();
downlink.setJobConf(conf);
}