当前位置: 首页>>代码示例>>Java>>正文


Java AccessMode类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode的典型用法代码示例。如果您正苦于以下问题:Java AccessMode类的具体用法?Java AccessMode怎么用?Java AccessMode使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


AccessMode类属于org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager包,在下文中一共展示了AccessMode类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getBlockLocalPathInfo

import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; //导入依赖的package包/类
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
    Token<BlockTokenIdentifier> token) throws IOException {
  checkBlockLocalPathAccess();
  checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
  Preconditions.checkNotNull(data, "Storage not yet initialized");
  BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
  if (LOG.isDebugEnabled()) {
    if (info != null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo successful block=" + block
            + " blockfile " + info.getBlockPath() + " metafile "
            + info.getMetaPath());
      }
    } else {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo for block=" + block
            + " returning null");
      }
    }
  }
  metrics.incrBlocksGetLocalPathInfo();
  return info;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:DataNode.java

示例2: getHdfsBlocksMetadata

import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(
    String bpId, long[] blockIds,
    List<Token<BlockTokenIdentifier>> tokens) throws IOException, 
    UnsupportedOperationException {
  if (!getHdfsBlockLocationsEnabled) {
    throw new UnsupportedOperationException("Datanode#getHdfsBlocksMetadata "
        + " is not enabled in datanode config");
  }
  if (blockIds.length != tokens.size()) {
    throw new IOException("Differing number of blocks and tokens");
  }
  // Check access for each block
  for (int i = 0; i < blockIds.length; i++) {
    checkBlockToken(new ExtendedBlock(bpId, blockIds[i]),
        tokens.get(i), BlockTokenSecretManager.AccessMode.READ);
  }

  DataNodeFaultInjector.get().getHdfsBlocksMetadata();

  return data.getHdfsBlocksMetadata(bpId, blockIds);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:DataNode.java

示例3: checkReadAccess

import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; //导入依赖的package包/类
private void checkReadAccess(final ExtendedBlock block) throws IOException {
  if (isBlockTokenEnabled) {
    Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
        .getTokenIdentifiers();
    if (tokenIds.size() != 1) {
      throw new IOException("Can't continue since none or more than one "
          + "BlockTokenIdentifier is found.");
    }
    for (TokenIdentifier tokenId : tokenIds) {
      BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Got: " + id.toString());
      }
      blockPoolTokenSecretManager.checkAccess(id, null, block,
          BlockTokenSecretManager.AccessMode.READ);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:DataNode.java

示例4: updateBlockForPipeline

import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; //导入依赖的package包/类
/**
 * Get a new generation stamp together with an access token for 
 * a block under construction
 * 
 * This method is called for recovering a failed pipeline or setting up
 * a pipeline to append to a block.
 * 
 * @param block a block
 * @param clientName the name of a client
 * @return a located block with a new generation stamp and an access token
 * @throws IOException if any error occurs
 */
LocatedBlock updateBlockForPipeline(ExtendedBlock block, 
    String clientName) throws IOException {
  LocatedBlock locatedBlock;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);

    // check vadility of parameters
    checkUCBlock(block, clientName);

    // get a new generation stamp and an access token
    block.setGenerationStamp(nextGenerationStamp(blockIdManager.isLegacyBlock(block.getLocalBlock())));
    locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]);
    blockManager.setBlockToken(locatedBlock, AccessMode.WRITE);
  } finally {
    writeUnlock();
  }
  // Ensure we record the new generation stamp
  getEditLog().logSync();
  return locatedBlock;
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:35,代码来源:FSNamesystem.java

示例5: updateBlockForPipeline

import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; //导入依赖的package包/类
/**
 * Get a new generation stamp together with an access token for 
 * a block under construction
 * 
 * This method is called for recovering a failed pipeline or setting up
 * a pipeline to append to a block.
 * 
 * @param block a block
 * @param clientName the name of a client
 * @return a located block with a new generation stamp and an access token
 * @throws IOException if any error occurs
 */
LocatedBlock updateBlockForPipeline(ExtendedBlock block, 
    String clientName) throws IOException {
  LocatedBlock locatedBlock;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);

    // check vadility of parameters
    checkUCBlock(block, clientName);

    // get a new generation stamp and an access token
    block.setGenerationStamp(
        nextGenerationStamp(isLegacyBlock(block.getLocalBlock())));
    locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]);
    blockManager.setBlockToken(locatedBlock, AccessMode.WRITE);
  } finally {
    writeUnlock();
  }
  // Ensure we record the new generation stamp
  getEditLog().logSync();
  return locatedBlock;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:36,代码来源:FSNamesystem.java

示例6: getBlockLocalPathInfo

import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; //导入依赖的package包/类
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
    Token<BlockTokenIdentifier> token) throws IOException {
  checkBlockLocalPathAccess();
  checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
  BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
  if (LOG.isDebugEnabled()) {
    if (info != null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo successful block=" + block
            + " blockfile " + info.getBlockPath() + " metafile "
            + info.getMetaPath());
      }
    } else {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo for block=" + block
            + " returning null");
      }
    }
  }
  metrics.incrBlocksGetLocalPathInfo();
  return info;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:24,代码来源:DataNode.java

示例7: getHdfsBlocksMetadata

import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks,
    List<Token<BlockTokenIdentifier>> tokens) throws IOException, 
    UnsupportedOperationException {
  if (!getHdfsBlockLocationsEnabled) {
    throw new UnsupportedOperationException("Datanode#getHdfsBlocksMetadata "
        + " is not enabled in datanode config");
  }
  if (blocks.size() != tokens.size()) {
    throw new IOException("Differing number of blocks and tokens");
  }
  // Check access for each block
  for (int i = 0; i < blocks.size(); i++) {
    checkBlockToken(blocks.get(i), tokens.get(i), 
        BlockTokenSecretManager.AccessMode.READ);
  }
  return data.getHdfsBlocksMetadata(blocks);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:19,代码来源:DataNode.java

示例8: checkWriteAccess

import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; //导入依赖的package包/类
private void checkWriteAccess(final ExtendedBlock block) throws IOException {
  if (isBlockTokenEnabled) {
    Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
        .getTokenIdentifiers();
    if (tokenIds.size() != 1) {
      throw new IOException("Can't continue since none or more than one "
          + "BlockTokenIdentifier is found.");
    }
    for (TokenIdentifier tokenId : tokenIds) {
      BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Got: " + id.toString());
      }
      blockPoolTokenSecretManager.checkAccess(id, null, block,
          BlockTokenSecretManager.AccessMode.READ);
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:19,代码来源:DataNode.java

示例9: getBlockLocalPathInfo

import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; //导入依赖的package包/类
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
    Token<BlockTokenIdentifier> token) throws IOException {
  checkBlockLocalPathAccess();
  checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
  BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
  if (LOG.isDebugEnabled()) {
    if (info != null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo successful block=" + block +
            " blockfile " + info.getBlockPath() + " metafile " +
            info.getMetaPath());
      }
    } else {
      if (LOG.isTraceEnabled()) {
        LOG.trace(
            "getBlockLocalPathInfo for block=" + block + " returning null");
      }
    }
  }
  metrics.incrBlocksGetLocalPathInfo();
  return info;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:24,代码来源:DataNode.java

示例10: getHdfsBlocksMetadata

import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks,
    List<Token<BlockTokenIdentifier>> tokens)
    throws IOException, UnsupportedOperationException {
  if (!getHdfsBlockLocationsEnabled) {
    throw new UnsupportedOperationException(
        "Datanode#getHdfsBlocksMetadata " +
            " is not enabled in datanode config");
  }
  if (blocks.size() != tokens.size()) {
    throw new IOException("Differing number of blocks and tokens");
  }
  // Check access for each block
  for (int i = 0; i < blocks.size(); i++) {
    checkBlockToken(blocks.get(i), tokens.get(i),
        BlockTokenSecretManager.AccessMode.READ);
  }
  return data.getHdfsBlocksMetadata(blocks);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:20,代码来源:DataNode.java

示例11: checkWriteAccess

import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; //导入依赖的package包/类
private void checkWriteAccess(final ExtendedBlock block) throws IOException {
  if (isBlockTokenEnabled) {
    Set<TokenIdentifier> tokenIds =
        UserGroupInformation.getCurrentUser().getTokenIdentifiers();
    if (tokenIds.size() != 1) {
      throw new IOException("Can't continue since none or more than one " +
          "BlockTokenIdentifier is found.");
    }
    for (TokenIdentifier tokenId : tokenIds) {
      BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Got: " + id.toString());
      }
      blockPoolTokenSecretManager.checkAccess(id, null, block,
          BlockTokenSecretManager.AccessMode.READ);
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:19,代码来源:DataNode.java

示例12: getBlockLocalPathInfo

import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; //导入依赖的package包/类
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(Block block,
    Token<BlockTokenIdentifier> token) throws IOException {
  checkBlockLocalPathAccess();
  checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
  BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
  if (LOG.isDebugEnabled()) {
    if (info != null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo successful block=" + block
            + " blockfile " + info.getBlockPath() + " metafile "
            + info.getMetaPath());
      }
    } else {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo for block=" + block
            + " returning null");
      }
    }
  }
  myMetrics.incrBlocksGetLocalPathInfo();
  return info;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:24,代码来源:DataNode.java

示例13: checkBlockToken

import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; //导入依赖的package包/类
/** Check block access token for the given access mode */
private void checkBlockToken(Block block,
    BlockTokenSecretManager.AccessMode accessMode) throws IOException {
  if (isBlockTokenEnabled && UserGroupInformation.isSecurityEnabled()) {
    Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
        .getTokenIdentifiers();
    if (tokenIds.size() != 1) {
      throw new IOException("Can't continue with "
          + "authorization since " + tokenIds.size()
          + " BlockTokenIdentifier " + "is found.");
    }
    for (TokenIdentifier tokenId : tokenIds) {
      BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Got: " + id.toString());
      }
      blockTokenSecretManager.checkAccess(id, null, block, accessMode);
    }
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:21,代码来源:DataNode.java


注:本文中的org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。