當前位置: 首頁>>代碼示例>>Java>>正文


Java TokenCache.getJobToken方法代碼示例

本文整理匯總了Java中org.apache.hadoop.mapreduce.security.TokenCache.getJobToken方法的典型用法代碼示例。如果您正苦於以下問題:Java TokenCache.getJobToken方法的具體用法?Java TokenCache.getJobToken怎麽用?Java TokenCache.getJobToken使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.mapreduce.security.TokenCache的用法示例。


在下文中一共展示了TokenCache.getJobToken方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: createJobToken

import org.apache.hadoop.mapreduce.security.TokenCache; //導入方法依賴的package包/類
/**
 * Obtain JobToken, which we'll use as a credential for SASL authentication
 * when connecting to other Giraph BSPWorkers.
 *
 * @param conf Configuration
 * @return a JobToken containing username and password so that client can
 * authenticate with a server.
 */
private Token<JobTokenIdentifier> createJobToken(Configuration conf)
  throws IOException {
  String localJobTokenFile = System.getenv().get(
      UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
  if (localJobTokenFile != null) {
    JobConf jobConf = new JobConf(conf);
    Credentials credentials =
        TokenCache.loadTokens(localJobTokenFile, jobConf);
    return TokenCache.getJobToken(credentials);
  } else {
    throw new IOException("createJobToken: Cannot obtain authentication " +
        "credentials for job: file: '" +
        UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION + "' not found");
  }
}
 
開發者ID:renato2099,項目名稱:giraph-gora,代碼行數:24,代碼來源:SaslNettyClient.java

示例2: Application

import org.apache.hadoop.mapreduce.security.TokenCache; //導入方法依賴的package包/類
/**
 * Start the child process to handle the task for us.
 * @param conf the task's configuration
 * @param recordReader the fake record reader to update progress with
 * @param output the collector to send output to
 * @param reporter the reporter for the task
 * @param outputKeyClass the class of the output keys
 * @param outputValueClass the class of the output values
 * @throws IOException
 * @throws InterruptedException
 */
Application(JobConf conf, 
            RecordReader<FloatWritable, NullWritable> recordReader, 
            OutputCollector<K2,V2> output, Reporter reporter,
            Class<? extends K2> outputKeyClass,
            Class<? extends V2> outputValueClass
            ) throws IOException, InterruptedException {
  serverSocket = new ServerSocket(0);
  Map<String, String> env = new HashMap<String,String>();
  // add TMPDIR environment variable with the value of java.io.tmpdir
  env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
  env.put(Submitter.PORT, 
          Integer.toString(serverSocket.getLocalPort()));
  
  //Add token to the environment if security is enabled
  Token<JobTokenIdentifier> jobToken = TokenCache.getJobToken(conf
      .getCredentials());
  // This password is used as shared secret key between this application and
  // child pipes process
  byte[]  password = jobToken.getPassword();
  String localPasswordFile = new File(".") + Path.SEPARATOR
      + "jobTokenPassword";
  writePasswordToLocalFile(localPasswordFile, password, conf);
  env.put("hadoop.pipes.shared.secret.location", localPasswordFile);
 
  List<String> cmd = new ArrayList<String>();
  String interpretor = conf.get(Submitter.INTERPRETOR);
  if (interpretor != null) {
    cmd.add(interpretor);
  }
  String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();
  if (!FileUtil.canExecute(new File(executable))) {
    // LinuxTaskController sets +x permissions on all distcache files already.
    // In case of DefaultTaskController, set permissions here.
    FileUtil.chmod(executable, "u+x");
  }
  cmd.add(executable);
  // wrap the command in a stdout/stderr capture
  // we are starting map/reduce task of the pipes job. this is not a cleanup
  // attempt. 
  TaskAttemptID taskid = 
    TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID));
  File stdout = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDOUT);
  File stderr = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDERR);
  long logLength = TaskLog.getTaskLogLength(conf);
  cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength,
                                   false);
  
  process = runClient(cmd, env);
  clientSocket = serverSocket.accept();
  
  String challenge = getSecurityChallenge();
  String digestToSend = createDigest(password, challenge);
  String digestExpected = createDigest(password, digestToSend);
  
  handler = new OutputHandler<K2, V2>(output, reporter, recordReader, 
      digestExpected);
  K2 outputKey = (K2)
    ReflectionUtils.newInstance(outputKeyClass, conf);
  V2 outputValue = (V2) 
    ReflectionUtils.newInstance(outputValueClass, conf);
  downlink = new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler, 
                                outputKey, outputValue, conf);
  
  downlink.authenticate(digestToSend, challenge);
  waitForAuthentication();
  LOG.debug("Authentication succeeded");
  downlink.start();
  downlink.setJobConf(conf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:81,代碼來源:Application.java

示例3: Application

import org.apache.hadoop.mapreduce.security.TokenCache; //導入方法依賴的package包/類
/**
 * Start the child process to handle the task for us.
 * @param conf the task's configuration
 * @param recordReader the fake record reader to update progress with
 * @param output the collector to send output to
 * @param reporter the reporter for the task
 * @param outputKeyClass the class of the output keys
 * @param outputValueClass the class of the output values
 * @throws IOException
 * @throws InterruptedException
 */
Application(JobConf conf, 
            RecordReader<FloatWritable, NullWritable> recordReader, 
            OutputCollector<K2,V2> output, Reporter reporter,
            Class<? extends K2> outputKeyClass,
            Class<? extends V2> outputValueClass
            ) throws IOException, InterruptedException {
  serverSocket = new ServerSocket(0);
  Map<String, String> env = new HashMap<String,String>();
  // add TMPDIR environment variable with the value of java.io.tmpdir
  env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
  env.put("hadoop.pipes.command.port", 
          Integer.toString(serverSocket.getLocalPort()));
  
  //Add token to the environment if security is enabled
  Token<JobTokenIdentifier> jobToken = TokenCache.getJobToken(conf
      .getCredentials());
  // This password is used as shared secret key between this application and
  // child pipes process
  byte[]  password = jobToken.getPassword();
  String localPasswordFile = new File(".") + Path.SEPARATOR
      + "jobTokenPassword";
  writePasswordToLocalFile(localPasswordFile, password, conf);
  env.put("hadoop.pipes.shared.secret.location", localPasswordFile);
 
  List<String> cmd = new ArrayList<String>();
  String interpretor = conf.get("hadoop.pipes.executable.interpretor");
  if (interpretor != null) {
    cmd.add(interpretor);
  }

  String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();
  if (!new File(executable).canExecute()) {
    // LinuxTaskController sets +x permissions on all distcache files already.
    // In case of DefaultTaskController, set permissions here.
    FileUtil.chmod(executable, "u+x");
  }
  cmd.add(executable);
  // wrap the command in a stdout/stderr capture
  TaskAttemptID taskid = TaskAttemptID.forName(conf.get("mapred.task.id"));
  // we are starting map/reduce task of the pipes job. this is not a cleanup
  // attempt. 
  File stdout = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDOUT);
  File stderr = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDERR);
  long logLength = TaskLog.getTaskLogLength(conf);
  cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength,
      false);

  process = runClient(cmd, env);
  clientSocket = serverSocket.accept();
  
  String challenge = getSecurityChallenge();
  String digestToSend = createDigest(password, challenge);
  String digestExpected = createDigest(password, digestToSend);
  
  handler = new OutputHandler<K2, V2>(output, reporter, recordReader, 
      digestExpected);
  K2 outputKey = (K2)
    ReflectionUtils.newInstance(outputKeyClass, conf);
  V2 outputValue = (V2) 
    ReflectionUtils.newInstance(outputValueClass, conf);
  downlink = new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler, 
                                outputKey, outputValue, conf);
  
  downlink.authenticate(digestToSend, challenge);
  waitForAuthentication();
  LOG.debug("Authentication succeeded");
  downlink.start();
  downlink.setJobConf(conf);
}
 
開發者ID:Nextzero,項目名稱:hadoop-2.6.0-cdh5.4.3,代碼行數:81,代碼來源:Application.java

示例4: Application

import org.apache.hadoop.mapreduce.security.TokenCache; //導入方法依賴的package包/類
/**
 * Start the child process to handle the task for us.
 * @param conf the task's configuration
 * @param recordReader the fake record reader to update progress with
 * @param output the collector to send output to
 * @param reporter the reporter for the task
 * @param outputKeyClass the class of the output keys
 * @param outputValueClass the class of the output values
 * @throws IOException
 * @throws InterruptedException
 */
Application(JobConf conf, 
            RecordReader<FloatWritable, NullWritable> recordReader, 
            OutputCollector<K2,V2> output, Reporter reporter,
            Class<? extends K2> outputKeyClass,
            Class<? extends V2> outputValueClass
            ) throws IOException, InterruptedException {
  serverSocket = new ServerSocket(0);
  Map<String, String> env = new HashMap<String,String>();
  // add TMPDIR environment variable with the value of java.io.tmpdir
  env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
  env.put(Submitter.PORT, 
          Integer.toString(serverSocket.getLocalPort()));
  
  //Add token to the environment if security is enabled
  Token<JobTokenIdentifier> jobToken = TokenCache.getJobToken(conf
      .getCredentials());
  // This password is used as shared secret key between this application and
  // child pipes process
  byte[]  password = jobToken.getPassword();
  String localPasswordFile = new File(".") + Path.SEPARATOR
      + "jobTokenPassword";
  writePasswordToLocalFile(localPasswordFile, password, conf);
  env.put("hadoop.pipes.shared.secret.location", localPasswordFile);
 
  List<String> cmd = new ArrayList<String>();
  String interpretor = conf.get(Submitter.INTERPRETOR);
  if (interpretor != null) {
    cmd.add(interpretor);
  }

  String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();
  if (!new File(executable).canExecute()) {
    // LinuxTaskController sets +x permissions on all distcache files already.
    // In case of DefaultTaskController, set permissions here.
    FileUtil.chmod(executable, "u+x");
  }
  cmd.add(executable);
  // wrap the command in a stdout/stderr capture
  // we are starting map/reduce task of the pipes job. this is not a cleanup
  // attempt. 
  TaskAttemptID taskid = 
    TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID));
  File stdout = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDOUT);
  File stderr = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDERR);
  long logLength = TaskLog.getTaskLogLength(conf);
  cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength,
                                   false);

  process = runClient(cmd, env);
  clientSocket = serverSocket.accept();
  
  String challenge = getSecurityChallenge();
  String digestToSend = createDigest(password, challenge);
  String digestExpected = createDigest(password, digestToSend);
  
  handler = new OutputHandler<K2, V2>(output, reporter, recordReader, 
      digestExpected);
  K2 outputKey = (K2)
    ReflectionUtils.newInstance(outputKeyClass, conf);
  V2 outputValue = (V2) 
    ReflectionUtils.newInstance(outputValueClass, conf);
  downlink = new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler, 
                                outputKey, outputValue, conf);
  
  downlink.authenticate(digestToSend, challenge);
  waitForAuthentication();
  LOG.debug("Authentication succeeded");
  downlink.start();
  downlink.setJobConf(conf);
}
 
開發者ID:rekhajoshm,項目名稱:mapreduce-fork,代碼行數:82,代碼來源:Application.java

示例5: localizeJobFiles

import org.apache.hadoop.mapreduce.security.TokenCache; //導入方法依賴的package包/類
/**
 * Localize the job on this tasktracker. Specifically
 * <ul>
 * <li>Cleanup and create job directories on all disks</li>
 * <li>Download the job config file job.xml from the FS</li>
 * <li>Create the job work directory and set {@link TaskTracker#JOB_LOCAL_DIR}
 * in the configuration.
 * <li>Download the job jar file job.jar from the FS, unjar it and set jar
 * file in the configuration.</li>
 * </ul>
 * 
 * @param t task whose job has to be localized on this TT
 * @return the modified job configuration to be used for all the tasks of this
 *         job as a starting point.
 * @throws IOException
 */
JobConf localizeJobFiles(Task t, RunningJob rjob)
    throws IOException, InterruptedException {
  JobID jobId = t.getJobID();
  String userName = t.getUser();

  // Initialize the job directories
  FileSystem localFs = FileSystem.getLocal(fConf);
  getLocalizer().initializeJobDirs(userName, jobId);
  // save local copy of JobToken file
  String localJobTokenFile = localizeJobTokenFile(t.getUser(), jobId);
  rjob.ugi = UserGroupInformation.createRemoteUser(t.getUser());

  Credentials ts = TokenCache.loadTokens(localJobTokenFile, fConf);
  Token<JobTokenIdentifier> jt = TokenCache.getJobToken(ts);
  if (jt != null) { //could be null in the case of some unit tests
    getJobTokenSecretManager().addTokenForJob(jobId.toString(), jt);
  }
  for (Token<? extends TokenIdentifier> token : ts.getAllTokens()) {
    rjob.ugi.addToken(token);
  }
  // Download the job.xml for this job from the system FS
  Path localJobFile =
      localizeJobConfFile(new Path(t.getJobFile()), userName, jobId);

  JobConf localJobConf = new JobConf(localJobFile);
  //WE WILL TRUST THE USERNAME THAT WE GOT FROM THE JOBTRACKER
  //AS PART OF THE TASK OBJECT
  localJobConf.setUser(userName);
  
  // set the location of the token file into jobConf to transfer 
  // the name to TaskRunner
  localJobConf.set(TokenCache.JOB_TOKENS_FILENAME,
      localJobTokenFile);
  

  // create the 'job-work' directory: job-specific shared directory for use as
  // scratch space by all tasks of the same job running on this TaskTracker. 
  Path workDir =
      lDirAlloc.getLocalPathForWrite(getJobWorkDir(userName, jobId
          .toString()), fConf);
  if (!localFs.mkdirs(workDir)) {
    throw new IOException("Mkdirs failed to create " 
                + workDir.toString());
  }
  System.setProperty(JOB_LOCAL_DIR, workDir.toUri().getPath());
  localJobConf.set(JOB_LOCAL_DIR, workDir.toUri().getPath());
  // Download the job.jar for this job from the system FS
  localizeJobJarFile(userName, jobId, localFs, localJobConf);
  
  return localJobConf;
}
 
開發者ID:rekhajoshm,項目名稱:mapreduce-fork,代碼行數:68,代碼來源:TaskTracker.java


注:本文中的org.apache.hadoop.mapreduce.security.TokenCache.getJobToken方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。