当前位置: 首页>>代码示例>>Java>>正文


Java Credentials.getAllTokens方法代码示例

本文整理汇总了Java中org.apache.hadoop.security.Credentials.getAllTokens方法的典型用法代码示例。如果您正苦于以下问题:Java Credentials.getAllTokens方法的具体用法?Java Credentials.getAllTokens怎么用?Java Credentials.getAllTokens使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.security.Credentials的用法示例。


在下文中一共展示了Credentials.getAllTokens方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: parseCredentials

import org.apache.hadoop.security.Credentials; //导入方法依赖的package包/类
private Credentials parseCredentials(ContainerLaunchContext launchContext)
    throws IOException {
  Credentials credentials = new Credentials();
  // //////////// Parse credentials
  ByteBuffer tokens = launchContext.getTokens();

  if (tokens != null) {
    DataInputByteBuffer buf = new DataInputByteBuffer();
    tokens.rewind();
    buf.reset(tokens);
    credentials.readTokenStorageStream(buf);
    if (LOG.isDebugEnabled()) {
      for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) {
        LOG.debug(tk.getService() + " = " + tk.toString());
      }
    }
  }
  // //////////// End of parsing credentials
  return credentials;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:ContainerManagerImpl.java

示例2: createBinaryTokenFile

import org.apache.hadoop.security.Credentials; //导入方法依赖的package包/类
private static void createBinaryTokenFile(Configuration conf) {
  // Fetch delegation tokens and store in binary token file.
  try {
    Credentials cred1 = new Credentials();
    Credentials cred2 = new Credentials();
    TokenCache.obtainTokensForNamenodesInternal(cred1, new Path[] { p1 },
        conf);
    for (Token<? extends TokenIdentifier> t : cred1.getAllTokens()) {
      cred2.addToken(new Text(DELEGATION_TOKEN_KEY), t);
    }
    DataOutputStream os = new DataOutputStream(new FileOutputStream(
        binaryTokenFileName.toString()));
    try {
      cred2.writeTokenStorageToStream(os);
    } finally {
      os.close();
    }
  } catch (IOException e) {
    Assert.fail("Exception " + e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestBinaryTokenFile.java

示例3: addTimelineDelegationToken

import org.apache.hadoop.security.Credentials; //导入方法依赖的package包/类
private void addTimelineDelegationToken(
    ContainerLaunchContext clc) throws YarnException, IOException {
  Credentials credentials = new Credentials();
  DataInputByteBuffer dibb = new DataInputByteBuffer();
  ByteBuffer tokens = clc.getTokens();
  if (tokens != null) {
    dibb.reset(tokens);
    credentials.readTokenStorageStream(dibb);
    tokens.rewind();
  }
  // If the timeline delegation token is already in the CLC, no need to add
  // one more
  for (org.apache.hadoop.security.token.Token<? extends TokenIdentifier> token : credentials
      .getAllTokens()) {
    if (token.getKind().equals(TimelineDelegationTokenIdentifier.KIND_NAME)) {
      return;
    }
  }
  org.apache.hadoop.security.token.Token<TimelineDelegationTokenIdentifier>
      timelineDelegationToken = getTimelineDelegationToken();
  if (timelineDelegationToken == null) {
    return;
  }
  credentials.addToken(timelineService, timelineDelegationToken);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Add timline delegation token into credentials: "
        + timelineDelegationToken);
  }
  DataOutputBuffer dob = new DataOutputBuffer();
  credentials.writeTokenStorageToStream(dob);
  tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
  clc.setTokens(tokens);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:YarnClientImpl.java

示例4: printTokens

import org.apache.hadoop.security.Credentials; //导入方法依赖的package包/类
private void printTokens(JobID jobId,
    Credentials credentials) throws IOException {
  LOG.info("Submitting tokens for job: " + jobId);
  for (Token<?> token: credentials.getAllTokens()) {
    LOG.info(token);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:JobSubmitter.java

示例5: runLocalization

import org.apache.hadoop.security.Credentials; //导入方法依赖的package包/类
@SuppressWarnings("deprecation")
public int runLocalization(final InetSocketAddress nmAddr)
    throws IOException, InterruptedException {
  // load credentials
  initDirs(conf, user, appId, lfs, localDirs);
  final Credentials creds = new Credentials();
  DataInputStream credFile = null;
  try {
    // assume credentials in cwd
    // TODO: Fix
    Path tokenPath =
        new Path(String.format(TOKEN_FILE_NAME_FMT, localizerId));
    credFile = lfs.open(tokenPath);
    creds.readTokenStorageStream(credFile);
    // Explicitly deleting token file.
    lfs.delete(tokenPath, false);      
  } finally  {
    if (credFile != null) {
      credFile.close();
    }
  }
  // create localizer context
  UserGroupInformation remoteUser =
    UserGroupInformation.createRemoteUser(user);
  remoteUser.addToken(creds.getToken(LocalizerTokenIdentifier.KIND));
  final LocalizationProtocol nodeManager =
      remoteUser.doAs(new PrivilegedAction<LocalizationProtocol>() {
        @Override
        public LocalizationProtocol run() {
          return getProxy(nmAddr);
        }
      });

  // create user context
  UserGroupInformation ugi =
    UserGroupInformation.createRemoteUser(user);
  for (Token<? extends TokenIdentifier> token : creds.getAllTokens()) {
    ugi.addToken(token);
  }

  ExecutorService exec = null;
  try {
    exec = createDownloadThreadPool();
    CompletionService<Path> ecs = createCompletionService(exec);
    localizeFiles(nodeManager, ecs, ugi);
    return 0;
  } catch (Throwable e) {
    // Print traces to stdout so that they can be logged by the NM address
    // space.
    e.printStackTrace(System.out);
    return -1;
  } finally {
    try {
      if (exec != null) {
        exec.shutdownNow();
      }
      LocalDirAllocator.removeContext(appCacheDirContextName);
    } finally {
      closeFileSystems(ugi);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:63,代码来源:ContainerLocalizer.java

示例6: handleAppSubmitEvent

import org.apache.hadoop.security.Credentials; //导入方法依赖的package包/类
private void handleAppSubmitEvent(DelegationTokenRenewerAppSubmitEvent evt)
    throws IOException, InterruptedException {
  ApplicationId applicationId = evt.getApplicationId();
  Credentials ts = evt.getCredentials();
  boolean shouldCancelAtEnd = evt.shouldCancelAtEnd();
  if (ts == null) {
    return; // nothing to add
  }

  if (LOG.isDebugEnabled()) {
    LOG.debug("Registering tokens for renewal for:" +
        " appId = " + applicationId);
  }

  Collection<Token<?>> tokens = ts.getAllTokens();
  long now = System.currentTimeMillis();

  // find tokens for renewal, but don't add timers until we know
  // all renewable tokens are valid
  // At RM restart it is safe to assume that all the previously added tokens
  // are valid
  appTokens.put(applicationId,
    Collections.synchronizedSet(new HashSet<DelegationTokenToRenew>()));
  Set<DelegationTokenToRenew> tokenList = new HashSet<DelegationTokenToRenew>();
  boolean hasHdfsToken = false;
  for (Token<?> token : tokens) {
    if (token.isManaged()) {
      if (token.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
        LOG.info(applicationId + " found existing hdfs token " + token);
        hasHdfsToken = true;
      }

      DelegationTokenToRenew dttr = allTokens.get(token);
      if (dttr == null) {
        dttr = new DelegationTokenToRenew(Arrays.asList(applicationId), token,
            getConfig(), now, shouldCancelAtEnd, evt.getUser());
        try {
          renewToken(dttr);
        } catch (IOException ioe) {
          throw new IOException("Failed to renew token: " + dttr.token, ioe);
        }
      }
      tokenList.add(dttr);
    }
  }

  if (!tokenList.isEmpty()) {
    // Renewing token and adding it to timer calls are separated purposefully
    // If user provides incorrect token then it should not be added for
    // renewal.
    for (DelegationTokenToRenew dtr : tokenList) {
      DelegationTokenToRenew currentDtr =
          allTokens.putIfAbsent(dtr.token, dtr);
      if (currentDtr != null) {
        // another job beat us
        currentDtr.referringAppIds.add(applicationId);
        appTokens.get(applicationId).add(currentDtr);
      } else {
        appTokens.get(applicationId).add(dtr);
        setTimerForTokenRenewal(dtr);
      }
    }
  }

  if (!hasHdfsToken) {
    requestNewHdfsDelegationToken(Arrays.asList(applicationId), evt.getUser(),
      shouldCancelAtEnd);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:70,代码来源:DelegationTokenRenewer.java

示例7: testAttemptContainerRequest

import org.apache.hadoop.security.Credentials; //导入方法依赖的package包/类
@Test
public void testAttemptContainerRequest() throws Exception {
  final Text SECRET_KEY_ALIAS = new Text("secretkeyalias");
  final byte[] SECRET_KEY = ("secretkey").getBytes();
  Map<ApplicationAccessType, String> acls =
      new HashMap<ApplicationAccessType, String>(1);
  acls.put(ApplicationAccessType.VIEW_APP, "otheruser");
  ApplicationId appId = ApplicationId.newInstance(1, 1);
  JobId jobId = MRBuilderUtils.newJobId(appId, 1);
  TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
  Path jobFile = mock(Path.class);

  EventHandler eventHandler = mock(EventHandler.class);
  TaskAttemptListener taListener = mock(TaskAttemptListener.class);
  when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));

  JobConf jobConf = new JobConf();
  jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
  jobConf.setBoolean("fs.file.impl.disable.cache", true);
  jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");

  // setup UGI for security so tokens and keys are preserved
  jobConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
  UserGroupInformation.setConfiguration(jobConf);

  Credentials credentials = new Credentials();
  credentials.addSecretKey(SECRET_KEY_ALIAS, SECRET_KEY);
  Token<JobTokenIdentifier> jobToken = new Token<JobTokenIdentifier>(
      ("tokenid").getBytes(), ("tokenpw").getBytes(),
      new Text("tokenkind"), new Text("tokenservice"));

  TaskAttemptImpl taImpl =
      new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
          mock(TaskSplitMetaInfo.class), jobConf, taListener,
          jobToken, credentials,
          new SystemClock(), null);

  jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, taImpl.getID().toString());

  ContainerLaunchContext launchCtx =
      TaskAttemptImpl.createContainerLaunchContext(acls,
          jobConf, jobToken, taImpl.createRemoteTask(),
          TypeConverter.fromYarn(jobId),
          mock(WrappedJvmID.class), taListener,
          credentials);

  Assert.assertEquals("ACLs mismatch", acls, launchCtx.getApplicationACLs());
  Credentials launchCredentials = new Credentials();

  DataInputByteBuffer dibb = new DataInputByteBuffer();
  dibb.reset(launchCtx.getTokens());
  launchCredentials.readTokenStorageStream(dibb);

  // verify all tokens specified for the task attempt are in the launch context
  for (Token<? extends TokenIdentifier> token : credentials.getAllTokens()) {
    Token<? extends TokenIdentifier> launchToken =
        launchCredentials.getToken(token.getService());
    Assert.assertNotNull("Token " + token.getService() + " is missing",
        launchToken);
    Assert.assertEquals("Token " + token.getService() + " mismatch",
        token, launchToken);
  }

  // verify the secret key is in the launch context
  Assert.assertNotNull("Secret key missing",
      launchCredentials.getSecretKey(SECRET_KEY_ALIAS));
  Assert.assertTrue("Secret key mismatch", Arrays.equals(SECRET_KEY,
      launchCredentials.getSecretKey(SECRET_KEY_ALIAS)));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:70,代码来源:TestTaskAttemptContainerRequest.java

示例8: readTokens

import org.apache.hadoop.security.Credentials; //导入方法依赖的package包/类
private static Collection<Token<?>> readTokens(Path file, Configuration conf)
    throws IOException {
  Credentials creds = Credentials.readTokenStorageFile(file, conf);
  return creds.getAllTokens();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:DelegationTokenFetcher.java


注:本文中的org.apache.hadoop.security.Credentials.getAllTokens方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。