当前位置: 首页>>代码示例>>Java>>正文


Java DataInputByteBuffer类代码示例

本文整理汇总了Java中org.apache.hadoop.io.DataInputByteBuffer的典型用法代码示例。如果您正苦于以下问题:Java DataInputByteBuffer类的具体用法?Java DataInputByteBuffer怎么用?Java DataInputByteBuffer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DataInputByteBuffer类属于org.apache.hadoop.io包,在下文中一共展示了DataInputByteBuffer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: parseCredentials

import org.apache.hadoop.io.DataInputByteBuffer; //导入依赖的package包/类
private Credentials parseCredentials(ContainerLaunchContext launchContext)
    throws IOException {
  Credentials credentials = new Credentials();
  // //////////// Parse credentials
  ByteBuffer tokens = launchContext.getTokens();

  if (tokens != null) {
    DataInputByteBuffer buf = new DataInputByteBuffer();
    tokens.rewind();
    buf.reset(tokens);
    credentials.readTokenStorageStream(buf);
    if (LOG.isDebugEnabled()) {
      for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) {
        LOG.debug(tk.getService() + " = " + tk.toString());
      }
    }
  }
  // //////////// End of parsing credentials
  return credentials;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:ContainerManagerImpl.java

示例2: parseCredentials

import org.apache.hadoop.io.DataInputByteBuffer; //导入依赖的package包/类
/**
 * Parses the container launch context and returns a Credential instance that
 * contains all the tokens from the launch context. 
 * @param launchContext
 * @return the credential instance
 * @throws IOException
 */
public static Credentials parseCredentials(
    ContainerLaunchContext launchContext) throws IOException {
  Credentials credentials = new Credentials();
  ByteBuffer tokens = launchContext.getTokens();

  if (tokens != null) {
    DataInputByteBuffer buf = new DataInputByteBuffer();
    tokens.rewind();
    buf.reset(tokens);
    credentials.readTokenStorageStream(buf);
    if (LOG.isDebugEnabled()) {
      for (Token<? extends TokenIdentifier> tk : credentials
          .getAllTokens()) {
        LOG.debug(tk.getService() + " = " + tk.toString());
      }
    }
  }

  return credentials;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:28,代码来源:YarnServerSecurityUtils.java

示例3: parseCredentials

import org.apache.hadoop.io.DataInputByteBuffer; //导入依赖的package包/类
private Credentials parseCredentials(ContainerLaunchContext launchContext)
    throws YarnException {
  Credentials credentials = new Credentials();
  // //////////// Parse credentials
  ByteBuffer tokens = launchContext.getTokens();

  if (tokens != null) {
    DataInputByteBuffer buf = new DataInputByteBuffer();
    tokens.rewind();
    buf.reset(tokens);
    try {
      credentials.readTokenStorageStream(buf);
      if (LOG.isDebugEnabled()) {
        for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) {
          LOG.debug(tk.getService() + " = " + tk.toString());
        }
      }
    } catch (IOException e) {
      throw RPCUtil.getRemoteException(e);
    }
  }
  // //////////// End of parsing credentials
  return credentials;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:25,代码来源:ContainerManagerImpl.java

示例4: storeApplicationAttemptStateInternal

import org.apache.hadoop.io.DataInputByteBuffer; //导入依赖的package包/类
@Override
public synchronized void storeApplicationAttemptStateInternal(
    ApplicationAttemptId appAttemptId,
    ApplicationAttemptStateDataPBImpl attemptStateData)
    throws Exception {
  Credentials credentials = null;
  if(attemptStateData.getAppAttemptTokens() != null){
    DataInputByteBuffer dibb = new DataInputByteBuffer();
    credentials = new Credentials();
    dibb.reset(attemptStateData.getAppAttemptTokens());
    credentials.readTokenStorageStream(dibb);
  }
  ApplicationAttemptState attemptState =
      new ApplicationAttemptState(appAttemptId,
        attemptStateData.getMasterContainer(), credentials,
        attemptStateData.getStartTime());

  ApplicationState appState = state.getApplicationState().get(
      attemptState.getAttemptId().getApplicationId());
  if (appState == null) {
    throw new YarnRuntimeException("Application doesn't exist");
  }
  appState.attempts.put(attemptState.getAttemptId(), attemptState);
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:25,代码来源:MemoryRMStateStore.java

示例5: setupTokens

import org.apache.hadoop.io.DataInputByteBuffer; //导入依赖的package包/类
private void setupTokens(
    ContainerLaunchContext container, ContainerId containerID)
    throws IOException {
  Map<String, String> environment = container.getEnvironment();
  environment.put(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV,
      application.getWebProxyBase());
  // Set AppSubmitTime and MaxAppAttempts to be consumable by the AM.
  ApplicationId applicationId =
      application.getAppAttemptId().getApplicationId();
  environment.put(
      ApplicationConstants.APP_SUBMIT_TIME_ENV,
      String.valueOf(rmContext.getRMApps()
          .get(applicationId)
          .getSubmitTime()));
  environment.put(ApplicationConstants.MAX_APP_ATTEMPTS_ENV,
      String.valueOf(rmContext.getRMApps().get(
          applicationId).getMaxAppAttempts()));

  Credentials credentials = new Credentials();
  DataInputByteBuffer dibb = new DataInputByteBuffer();
  if (container.getTokens() != null) {
    // TODO: Don't do this kind of checks everywhere.
    dibb.reset(container.getTokens());
    credentials.readTokenStorageStream(dibb);
  }

  // Add AMRMToken
  Token<AMRMTokenIdentifier> amrmToken = createAndSetAMRMToken();
  if (amrmToken != null) {
    credentials.addToken(amrmToken.getService(), amrmToken);
  }
  DataOutputBuffer dob = new DataOutputBuffer();
  credentials.writeTokenStorageToStream(dob);
  container.setTokens(ByteBuffer.wrap(dob.getData(), 0, dob.getLength()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:AMLauncher.java

示例6: parseCredentials

import org.apache.hadoop.io.DataInputByteBuffer; //导入依赖的package包/类
protected Credentials parseCredentials() throws IOException {
  Credentials credentials = new Credentials();
  DataInputByteBuffer dibb = new DataInputByteBuffer();
  ByteBuffer tokens = submissionContext.getAMContainerSpec().getTokens();
  if (tokens != null) {
    dibb.reset(tokens);
    credentials.readTokenStorageStream(dibb);
    tokens.rewind();
  }
  return credentials;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:RMAppImpl.java

示例7: parseCredentials

import org.apache.hadoop.io.DataInputByteBuffer; //导入依赖的package包/类
protected Credentials parseCredentials(
    ApplicationSubmissionContext application) throws IOException {
  Credentials credentials = new Credentials();
  DataInputByteBuffer dibb = new DataInputByteBuffer();
  ByteBuffer tokens = application.getAMContainerSpec().getTokens();
  if (tokens != null) {
    dibb.reset(tokens);
    credentials.readTokenStorageStream(dibb);
    tokens.rewind();
  }
  return credentials;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:RMAppManager.java

示例8: getContainerCredentials

import org.apache.hadoop.io.DataInputByteBuffer; //导入依赖的package包/类
public Credentials getContainerCredentials() throws IOException {
  Credentials credentials = new Credentials();
  DataInputByteBuffer buf = new DataInputByteBuffer();
  containerTokens.rewind();
  buf.reset(containerTokens);
  credentials.readTokenStorageStream(buf);
  return credentials;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestAMAuthorization.java

示例9: addTimelineDelegationToken

import org.apache.hadoop.io.DataInputByteBuffer; //导入依赖的package包/类
private void addTimelineDelegationToken(
    ContainerLaunchContext clc) throws YarnException, IOException {
  Credentials credentials = new Credentials();
  DataInputByteBuffer dibb = new DataInputByteBuffer();
  ByteBuffer tokens = clc.getTokens();
  if (tokens != null) {
    dibb.reset(tokens);
    credentials.readTokenStorageStream(dibb);
    tokens.rewind();
  }
  // If the timeline delegation token is already in the CLC, no need to add
  // one more
  for (org.apache.hadoop.security.token.Token<? extends TokenIdentifier> token : credentials
      .getAllTokens()) {
    if (token.getKind().equals(TimelineDelegationTokenIdentifier.KIND_NAME)) {
      return;
    }
  }
  org.apache.hadoop.security.token.Token<TimelineDelegationTokenIdentifier>
      timelineDelegationToken = getTimelineDelegationToken();
  if (timelineDelegationToken == null) {
    return;
  }
  credentials.addToken(timelineService, timelineDelegationToken);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Add timline delegation token into credentials: "
        + timelineDelegationToken);
  }
  DataOutputBuffer dob = new DataOutputBuffer();
  credentials.writeTokenStorageToStream(dob);
  tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
  clc.setTokens(tokens);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:YarnClientImpl.java

示例10: deserializeMetaData

import org.apache.hadoop.io.DataInputByteBuffer; //导入依赖的package包/类
/**
 * A helper function to deserialize the metadata returned by ShuffleHandler.
 * @param meta the metadata returned by the ShuffleHandler
 * @return the port the Shuffle Handler is listening on to serve shuffle data.
 */
public static int deserializeMetaData(ByteBuffer meta) throws IOException {
  //TODO this should be returning a class not just an int
  DataInputByteBuffer in = new DataInputByteBuffer();
  in.reset(meta);
  int port = in.readInt();
  return port;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:ShuffleHandler.java

示例11: deserializeServiceData

import org.apache.hadoop.io.DataInputByteBuffer; //导入依赖的package包/类
static Token<JobTokenIdentifier> deserializeServiceData(ByteBuffer secret) throws IOException {
  DataInputByteBuffer in = new DataInputByteBuffer();
  in.reset(secret);
  Token<JobTokenIdentifier> jt = new Token<JobTokenIdentifier>();
  jt.readFields(in);
  return jt;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:ShuffleHandler.java

示例12: setupTokens

import org.apache.hadoop.io.DataInputByteBuffer; //导入依赖的package包/类
@Private
@VisibleForTesting
protected void setupTokens(
    ContainerLaunchContext container, ContainerId containerID)
    throws IOException {
  Map<String, String> environment = container.getEnvironment();
  environment.put(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV,
      application.getWebProxyBase());
  // Set AppSubmitTime to be consumable by the AM.
  ApplicationId applicationId =
      application.getAppAttemptId().getApplicationId();
  environment.put(
      ApplicationConstants.APP_SUBMIT_TIME_ENV,
      String.valueOf(rmContext.getRMApps()
          .get(applicationId)
          .getSubmitTime()));

  Credentials credentials = new Credentials();
  DataInputByteBuffer dibb = new DataInputByteBuffer();
  ByteBuffer tokens = container.getTokens();
  if (tokens != null) {
    // TODO: Don't do this kind of checks everywhere.
    dibb.reset(tokens);
    credentials.readTokenStorageStream(dibb);
    tokens.rewind();
  }

  // Add AMRMToken
  Token<AMRMTokenIdentifier> amrmToken = createAndSetAMRMToken();
  if (amrmToken != null) {
    credentials.addToken(amrmToken.getService(), amrmToken);
  }
  DataOutputBuffer dob = new DataOutputBuffer();
  credentials.writeTokenStorageToStream(dob);
  container.setTokens(ByteBuffer.wrap(dob.getData(), 0, dob.getLength()));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:37,代码来源:AMLauncher.java

示例13: decodeCredentials

import org.apache.hadoop.io.DataInputByteBuffer; //导入依赖的package包/类
/**
 * Decodes {@link Credentials} from the given buffer.
 * If the buffer is null or empty, it returns an empty Credentials.
 */
public static Credentials decodeCredentials(ByteBuffer buffer) throws IOException {
  Credentials credentials = new Credentials();
  if (buffer != null && buffer.hasRemaining()) {
    DataInputByteBuffer in = new DataInputByteBuffer();
    in.reset(buffer);
    credentials.readTokenStorageStream(in);
  }
  return credentials;
}
 
开发者ID:apache,项目名称:twill,代码行数:14,代码来源:YarnUtils.java

示例14: storeApplicationAttemptStateInternal

import org.apache.hadoop.io.DataInputByteBuffer; //导入依赖的package包/类
@Override
public synchronized void storeApplicationAttemptStateInternal(
    ApplicationAttemptId appAttemptId,
    ApplicationAttemptStateData attemptStateData)
    throws Exception {
  Credentials credentials = null;
  if(attemptStateData.getAppAttemptTokens() != null){
    DataInputByteBuffer dibb = new DataInputByteBuffer();
    credentials = new Credentials();
    dibb.reset(attemptStateData.getAppAttemptTokens());
    credentials.readTokenStorageStream(dibb);
  }
  ApplicationAttemptState attemptState =
      new ApplicationAttemptState(appAttemptId,
        attemptStateData.getMasterContainer(), credentials,
        attemptStateData.getStartTime(),
        attemptStateData.getMemorySeconds(),
        attemptStateData.getVcoreSeconds());


  ApplicationState appState = state.getApplicationState().get(
      attemptState.getAttemptId().getApplicationId());
  if (appState == null) {
    throw new YarnRuntimeException("Application doesn't exist");
  }
  appState.attempts.put(attemptState.getAttemptId(), attemptState);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:28,代码来源:MemoryRMStateStore.java

示例15: updateApplicationAttemptStateInternal

import org.apache.hadoop.io.DataInputByteBuffer; //导入依赖的package包/类
@Override
public synchronized void updateApplicationAttemptStateInternal(
    ApplicationAttemptId appAttemptId,
    ApplicationAttemptStateData attemptStateData)
    throws Exception {
  Credentials credentials = null;
  if (attemptStateData.getAppAttemptTokens() != null) {
    DataInputByteBuffer dibb = new DataInputByteBuffer();
    credentials = new Credentials();
    dibb.reset(attemptStateData.getAppAttemptTokens());
    credentials.readTokenStorageStream(dibb);
  }
  ApplicationAttemptState updatedAttemptState =
      new ApplicationAttemptState(appAttemptId,
        attemptStateData.getMasterContainer(), credentials,
        attemptStateData.getStartTime(), attemptStateData.getState(),
        attemptStateData.getFinalTrackingUrl(),
        attemptStateData.getDiagnostics(),
        attemptStateData.getFinalApplicationStatus(),
        attemptStateData.getAMContainerExitStatus(),
        attemptStateData.getFinishTime(),
        attemptStateData.getMemorySeconds(),
        attemptStateData.getVcoreSeconds());

  ApplicationState appState =
      state.getApplicationState().get(
        updatedAttemptState.getAttemptId().getApplicationId());
  if (appState == null) {
    throw new YarnRuntimeException("Application doesn't exist");
  }
  LOG.info("Updating final state " + updatedAttemptState.getState()
      + " for attempt: " + updatedAttemptState.getAttemptId());
  appState.attempts.put(updatedAttemptState.getAttemptId(),
    updatedAttemptState);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:36,代码来源:MemoryRMStateStore.java


注:本文中的org.apache.hadoop.io.DataInputByteBuffer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。