本文整理汇总了Java中org.apache.hadoop.mapreduce.security.TokenCache.getJobToken方法的典型用法代码示例。如果您正苦于以下问题:Java TokenCache.getJobToken方法的具体用法?Java TokenCache.getJobToken怎么用?Java TokenCache.getJobToken使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapreduce.security.TokenCache
的用法示例。
在下文中一共展示了TokenCache.getJobToken方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createJobToken
import org.apache.hadoop.mapreduce.security.TokenCache; //导入方法依赖的package包/类
/**
* Obtain JobToken, which we'll use as a credential for SASL authentication
* when connecting to other Giraph BSPWorkers.
*
* @param conf Configuration
* @return a JobToken containing username and password so that client can
* authenticate with a server.
*/
private Token<JobTokenIdentifier> createJobToken(Configuration conf)
throws IOException {
String localJobTokenFile = System.getenv().get(
UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
if (localJobTokenFile != null) {
JobConf jobConf = new JobConf(conf);
Credentials credentials =
TokenCache.loadTokens(localJobTokenFile, jobConf);
return TokenCache.getJobToken(credentials);
} else {
throw new IOException("createJobToken: Cannot obtain authentication " +
"credentials for job: file: '" +
UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION + "' not found");
}
}
示例2: Application
import org.apache.hadoop.mapreduce.security.TokenCache; //导入方法依赖的package包/类
/**
* Start the child process to handle the task for us.
* @param conf the task's configuration
* @param recordReader the fake record reader to update progress with
* @param output the collector to send output to
* @param reporter the reporter for the task
* @param outputKeyClass the class of the output keys
* @param outputValueClass the class of the output values
* @throws IOException
* @throws InterruptedException
*/
Application(JobConf conf,
RecordReader<FloatWritable, NullWritable> recordReader,
OutputCollector<K2,V2> output, Reporter reporter,
Class<? extends K2> outputKeyClass,
Class<? extends V2> outputValueClass
) throws IOException, InterruptedException {
serverSocket = new ServerSocket(0);
Map<String, String> env = new HashMap<String,String>();
// add TMPDIR environment variable with the value of java.io.tmpdir
env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
env.put(Submitter.PORT,
Integer.toString(serverSocket.getLocalPort()));
//Add token to the environment if security is enabled
Token<JobTokenIdentifier> jobToken = TokenCache.getJobToken(conf
.getCredentials());
// This password is used as shared secret key between this application and
// child pipes process
byte[] password = jobToken.getPassword();
String localPasswordFile = new File(".") + Path.SEPARATOR
+ "jobTokenPassword";
writePasswordToLocalFile(localPasswordFile, password, conf);
env.put("hadoop.pipes.shared.secret.location", localPasswordFile);
List<String> cmd = new ArrayList<String>();
String interpretor = conf.get(Submitter.INTERPRETOR);
if (interpretor != null) {
cmd.add(interpretor);
}
String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();
if (!FileUtil.canExecute(new File(executable))) {
// LinuxTaskController sets +x permissions on all distcache files already.
// In case of DefaultTaskController, set permissions here.
FileUtil.chmod(executable, "u+x");
}
cmd.add(executable);
// wrap the command in a stdout/stderr capture
// we are starting map/reduce task of the pipes job. this is not a cleanup
// attempt.
TaskAttemptID taskid =
TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID));
File stdout = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDOUT);
File stderr = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDERR);
long logLength = TaskLog.getTaskLogLength(conf);
cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength,
false);
process = runClient(cmd, env);
clientSocket = serverSocket.accept();
String challenge = getSecurityChallenge();
String digestToSend = createDigest(password, challenge);
String digestExpected = createDigest(password, digestToSend);
handler = new OutputHandler<K2, V2>(output, reporter, recordReader,
digestExpected);
K2 outputKey = (K2)
ReflectionUtils.newInstance(outputKeyClass, conf);
V2 outputValue = (V2)
ReflectionUtils.newInstance(outputValueClass, conf);
downlink = new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler,
outputKey, outputValue, conf);
downlink.authenticate(digestToSend, challenge);
waitForAuthentication();
LOG.debug("Authentication succeeded");
downlink.start();
downlink.setJobConf(conf);
}
示例3: Application
import org.apache.hadoop.mapreduce.security.TokenCache; //导入方法依赖的package包/类
/**
* Start the child process to handle the task for us.
* @param conf the task's configuration
* @param recordReader the fake record reader to update progress with
* @param output the collector to send output to
* @param reporter the reporter for the task
* @param outputKeyClass the class of the output keys
* @param outputValueClass the class of the output values
* @throws IOException
* @throws InterruptedException
*/
Application(JobConf conf,
RecordReader<FloatWritable, NullWritable> recordReader,
OutputCollector<K2,V2> output, Reporter reporter,
Class<? extends K2> outputKeyClass,
Class<? extends V2> outputValueClass
) throws IOException, InterruptedException {
serverSocket = new ServerSocket(0);
Map<String, String> env = new HashMap<String,String>();
// add TMPDIR environment variable with the value of java.io.tmpdir
env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
env.put("hadoop.pipes.command.port",
Integer.toString(serverSocket.getLocalPort()));
//Add token to the environment if security is enabled
Token<JobTokenIdentifier> jobToken = TokenCache.getJobToken(conf
.getCredentials());
// This password is used as shared secret key between this application and
// child pipes process
byte[] password = jobToken.getPassword();
String localPasswordFile = new File(".") + Path.SEPARATOR
+ "jobTokenPassword";
writePasswordToLocalFile(localPasswordFile, password, conf);
env.put("hadoop.pipes.shared.secret.location", localPasswordFile);
List<String> cmd = new ArrayList<String>();
String interpretor = conf.get("hadoop.pipes.executable.interpretor");
if (interpretor != null) {
cmd.add(interpretor);
}
String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();
if (!new File(executable).canExecute()) {
// LinuxTaskController sets +x permissions on all distcache files already.
// In case of DefaultTaskController, set permissions here.
FileUtil.chmod(executable, "u+x");
}
cmd.add(executable);
// wrap the command in a stdout/stderr capture
TaskAttemptID taskid = TaskAttemptID.forName(conf.get("mapred.task.id"));
// we are starting map/reduce task of the pipes job. this is not a cleanup
// attempt.
File stdout = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDOUT);
File stderr = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDERR);
long logLength = TaskLog.getTaskLogLength(conf);
cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength,
false);
process = runClient(cmd, env);
clientSocket = serverSocket.accept();
String challenge = getSecurityChallenge();
String digestToSend = createDigest(password, challenge);
String digestExpected = createDigest(password, digestToSend);
handler = new OutputHandler<K2, V2>(output, reporter, recordReader,
digestExpected);
K2 outputKey = (K2)
ReflectionUtils.newInstance(outputKeyClass, conf);
V2 outputValue = (V2)
ReflectionUtils.newInstance(outputValueClass, conf);
downlink = new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler,
outputKey, outputValue, conf);
downlink.authenticate(digestToSend, challenge);
waitForAuthentication();
LOG.debug("Authentication succeeded");
downlink.start();
downlink.setJobConf(conf);
}
示例4: Application
import org.apache.hadoop.mapreduce.security.TokenCache; //导入方法依赖的package包/类
/**
* Start the child process to handle the task for us.
* @param conf the task's configuration
* @param recordReader the fake record reader to update progress with
* @param output the collector to send output to
* @param reporter the reporter for the task
* @param outputKeyClass the class of the output keys
* @param outputValueClass the class of the output values
* @throws IOException
* @throws InterruptedException
*/
Application(JobConf conf,
RecordReader<FloatWritable, NullWritable> recordReader,
OutputCollector<K2,V2> output, Reporter reporter,
Class<? extends K2> outputKeyClass,
Class<? extends V2> outputValueClass
) throws IOException, InterruptedException {
serverSocket = new ServerSocket(0);
Map<String, String> env = new HashMap<String,String>();
// add TMPDIR environment variable with the value of java.io.tmpdir
env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
env.put(Submitter.PORT,
Integer.toString(serverSocket.getLocalPort()));
//Add token to the environment if security is enabled
Token<JobTokenIdentifier> jobToken = TokenCache.getJobToken(conf
.getCredentials());
// This password is used as shared secret key between this application and
// child pipes process
byte[] password = jobToken.getPassword();
String localPasswordFile = new File(".") + Path.SEPARATOR
+ "jobTokenPassword";
writePasswordToLocalFile(localPasswordFile, password, conf);
env.put("hadoop.pipes.shared.secret.location", localPasswordFile);
List<String> cmd = new ArrayList<String>();
String interpretor = conf.get(Submitter.INTERPRETOR);
if (interpretor != null) {
cmd.add(interpretor);
}
String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();
if (!new File(executable).canExecute()) {
// LinuxTaskController sets +x permissions on all distcache files already.
// In case of DefaultTaskController, set permissions here.
FileUtil.chmod(executable, "u+x");
}
cmd.add(executable);
// wrap the command in a stdout/stderr capture
// we are starting map/reduce task of the pipes job. this is not a cleanup
// attempt.
TaskAttemptID taskid =
TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID));
File stdout = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDOUT);
File stderr = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDERR);
long logLength = TaskLog.getTaskLogLength(conf);
cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength,
false);
process = runClient(cmd, env);
clientSocket = serverSocket.accept();
String challenge = getSecurityChallenge();
String digestToSend = createDigest(password, challenge);
String digestExpected = createDigest(password, digestToSend);
handler = new OutputHandler<K2, V2>(output, reporter, recordReader,
digestExpected);
K2 outputKey = (K2)
ReflectionUtils.newInstance(outputKeyClass, conf);
V2 outputValue = (V2)
ReflectionUtils.newInstance(outputValueClass, conf);
downlink = new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler,
outputKey, outputValue, conf);
downlink.authenticate(digestToSend, challenge);
waitForAuthentication();
LOG.debug("Authentication succeeded");
downlink.start();
downlink.setJobConf(conf);
}
示例5: localizeJobFiles
import org.apache.hadoop.mapreduce.security.TokenCache; //导入方法依赖的package包/类
/**
* Localize the job on this tasktracker. Specifically
* <ul>
* <li>Cleanup and create job directories on all disks</li>
* <li>Download the job config file job.xml from the FS</li>
* <li>Create the job work directory and set {@link TaskTracker#JOB_LOCAL_DIR}
* in the configuration.
* <li>Download the job jar file job.jar from the FS, unjar it and set jar
* file in the configuration.</li>
* </ul>
*
* @param t task whose job has to be localized on this TT
* @return the modified job configuration to be used for all the tasks of this
* job as a starting point.
* @throws IOException
*/
JobConf localizeJobFiles(Task t, RunningJob rjob)
throws IOException, InterruptedException {
JobID jobId = t.getJobID();
String userName = t.getUser();
// Initialize the job directories
FileSystem localFs = FileSystem.getLocal(fConf);
getLocalizer().initializeJobDirs(userName, jobId);
// save local copy of JobToken file
String localJobTokenFile = localizeJobTokenFile(t.getUser(), jobId);
rjob.ugi = UserGroupInformation.createRemoteUser(t.getUser());
Credentials ts = TokenCache.loadTokens(localJobTokenFile, fConf);
Token<JobTokenIdentifier> jt = TokenCache.getJobToken(ts);
if (jt != null) { //could be null in the case of some unit tests
getJobTokenSecretManager().addTokenForJob(jobId.toString(), jt);
}
for (Token<? extends TokenIdentifier> token : ts.getAllTokens()) {
rjob.ugi.addToken(token);
}
// Download the job.xml for this job from the system FS
Path localJobFile =
localizeJobConfFile(new Path(t.getJobFile()), userName, jobId);
JobConf localJobConf = new JobConf(localJobFile);
//WE WILL TRUST THE USERNAME THAT WE GOT FROM THE JOBTRACKER
//AS PART OF THE TASK OBJECT
localJobConf.setUser(userName);
// set the location of the token file into jobConf to transfer
// the name to TaskRunner
localJobConf.set(TokenCache.JOB_TOKENS_FILENAME,
localJobTokenFile);
// create the 'job-work' directory: job-specific shared directory for use as
// scratch space by all tasks of the same job running on this TaskTracker.
Path workDir =
lDirAlloc.getLocalPathForWrite(getJobWorkDir(userName, jobId
.toString()), fConf);
if (!localFs.mkdirs(workDir)) {
throw new IOException("Mkdirs failed to create "
+ workDir.toString());
}
System.setProperty(JOB_LOCAL_DIR, workDir.toUri().getPath());
localJobConf.set(JOB_LOCAL_DIR, workDir.toUri().getPath());
// Download the job.jar for this job from the system FS
localizeJobJarFile(userName, jobId, localFs, localJobConf);
return localJobConf;
}