当前位置: 首页>>代码示例>>Java>>正文


Java BlockTokenIdentifier类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier的典型用法代码示例。如果您正苦于以下问题:Java BlockTokenIdentifier类的具体用法?Java BlockTokenIdentifier怎么用?Java BlockTokenIdentifier使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


BlockTokenIdentifier类属于org.apache.hadoop.hdfs.security.token.block包,在下文中一共展示了BlockTokenIdentifier类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: connectToDN

import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; //导入依赖的package包/类
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
public static IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
                                       Configuration conf,
                                       SaslDataTransferClient saslClient,
                                       SocketFactory socketFactory,
                                       boolean connectToDnViaHostname,
                                       DataEncryptionKeyFactory dekFactory,
                                       Token<BlockTokenIdentifier> blockToken)
    throws IOException {

  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(connectToDnViaHostname);
    LOG.debug("Connecting to datanode {}", dnAddr);
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair pair = saslClient.newSocketSend(sock, unbufOut,
        unbufIn, dekFactory, blockToken, dn);

    IOStreamPair result = new IOStreamPair(
        new DataInputStream(pair.in),
        new DataOutputStream(new BufferedOutputStream(pair.out,
            NuCypherExtUtilClient.getSmallBufferSize(conf)))
    );

    success = true;
    return result;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:42,代码来源:NuCypherExtUtilClient.java

示例2: peerFromSocketAndKey

import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; //导入依赖的package包/类
public static Peer peerFromSocketAndKey(
    SaslDataTransferClient saslClient, Socket s,
    DataEncryptionKeyFactory keyFactory,
    Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
    throws IOException {
  Peer peer = null;
  boolean success = false;
  try {
    peer = peerFromSocket(s);
    peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtilsClient.cleanup(null, peer);
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:19,代码来源:NuCypherExtUtilClient.java

示例3: checkTrustAndSend

import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; //导入依赖的package包/类
/**
 * Checks if an address is already trusted and then sends client SASL
 * negotiation if required.
 *
 * @param addr connection address
 * @param underlyingOut connection output stream
 * @param underlyingIn connection input stream
 * @param encryptionKeyFactory for creation of an encryption key
 * @param accessToken connection block access token
 * @param datanodeId ID of destination DataNode
 * @return new pair of streams, wrapped after SASL negotiation
 * @throws IOException for any error
 */
private IOStreamPair checkTrustAndSend(InetAddress addr,
    OutputStream underlyingOut, InputStream underlyingIn,
    DataEncryptionKeyFactory encryptionKeyFactory,
    Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
    throws IOException {
  if (!trustedChannelResolver.isTrusted() &&
      !trustedChannelResolver.isTrusted(addr)) {
    // The encryption key factory only returns a key if encryption is enabled.
    DataEncryptionKey encryptionKey =
      encryptionKeyFactory.newDataEncryptionKey();
    return send(addr, underlyingOut, underlyingIn, encryptionKey, accessToken,
      datanodeId);
  } else {
    LOG.debug(
      "SASL client skipping handshake on trusted connection for addr = {}, "
      + "datanodeId = {}", addr, datanodeId);
    return null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:SaslDataTransferClient.java

示例4: readBlock

import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; //导入依赖的package包/类
@Override
public void readBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final long blockOffset,
    final long length,
    final boolean sendChecksum,
    final CachingStrategy cachingStrategy) throws IOException {

  OpReadBlockProto proto = OpReadBlockProto.newBuilder()
    .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken))
    .setOffset(blockOffset)
    .setLen(length)
    .setSendChecksums(sendChecksum)
    .setCachingStrategy(getCachingStrategy(cachingStrategy))
    .build();

  send(out, Op.READ_BLOCK, proto);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:Sender.java

示例5: transferBlock

import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; //导入依赖的package包/类
@Override
public void transferBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes) throws IOException {
  
  OpTransferBlockProto proto = OpTransferBlockProto.newBuilder()
    .setHeader(DataTransferProtoUtil.buildClientHeader(
        blk, clientName, blockToken))
    .addAllTargets(PBHelper.convert(targets))
    .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes))
    .build();

  send(out, Op.TRANSFER_BLOCK, proto);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:Sender.java

示例6: requestShortCircuitFds

import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; //导入依赖的package包/类
@Override
public void requestShortCircuitFds(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    SlotId slotId, int maxVersion, boolean supportsReceiptVerification)
      throws IOException {
  OpRequestShortCircuitAccessProto.Builder builder =
      OpRequestShortCircuitAccessProto.newBuilder()
        .setHeader(DataTransferProtoUtil.buildBaseHeader(
          blk, blockToken)).setMaxVersion(maxVersion);
  if (slotId != null) {
    builder.setSlotId(PBHelper.convert(slotId));
  }
  builder.setSupportsReceiptVerification(supportsReceiptVerification);
  OpRequestShortCircuitAccessProto proto = builder.build();
  send(out, Op.REQUEST_SHORT_CIRCUIT_FDS, proto);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:Sender.java

示例7: newConnectedPeer

import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; //导入依赖的package包/类
@Override // RemotePeerFactory
public Peer newConnectedPeer(InetSocketAddress addr,
    Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
    throws IOException {
  Peer peer = null;
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    NetUtils.connect(sock, addr,
      getRandomLocalInterfaceAddr(),
      dfsClientConf.socketTimeout);
    peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
        blockToken, datanodeId);
    peer.setReadTimeout(dfsClientConf.socketTimeout);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtils.cleanup(LOG, peer);
      IOUtils.closeSocket(sock);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:DFSClient.java

示例8: transferBlock

import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; //导入依赖的package包/类
@Override
public void transferBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes) throws IOException {
  checkAccess(socketOut, true, blk, blockToken,
      Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
  previousOpClientName = clientName;
  updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);

  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  try {
    datanode.transferReplicaForPipelineRecovery(blk, targets,
        targetStorageTypes, clientName);
    writeResponse(Status.SUCCESS, null, out);
  } catch (IOException ioe) {
    LOG.info("transferBlock " + blk + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:DataXceiver.java

示例9: getBlockLocalPathInfo

import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; //导入依赖的package包/类
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
    Token<BlockTokenIdentifier> token) throws IOException {
  checkBlockLocalPathAccess();
  checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
  Preconditions.checkNotNull(data, "Storage not yet initialized");
  BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
  if (LOG.isDebugEnabled()) {
    if (info != null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo successful block=" + block
            + " blockfile " + info.getBlockPath() + " metafile "
            + info.getMetaPath());
      }
    } else {
      if (LOG.isTraceEnabled()) {
        LOG.trace("getBlockLocalPathInfo for block=" + block
            + " returning null");
      }
    }
  }
  metrics.incrBlocksGetLocalPathInfo();
  return info;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:DataNode.java

示例10: getHdfsBlocksMetadata

import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(
    String bpId, long[] blockIds,
    List<Token<BlockTokenIdentifier>> tokens) throws IOException, 
    UnsupportedOperationException {
  if (!getHdfsBlockLocationsEnabled) {
    throw new UnsupportedOperationException("Datanode#getHdfsBlocksMetadata "
        + " is not enabled in datanode config");
  }
  if (blockIds.length != tokens.size()) {
    throw new IOException("Differing number of blocks and tokens");
  }
  // Check access for each block
  for (int i = 0; i < blockIds.length; i++) {
    checkBlockToken(new ExtendedBlock(bpId, blockIds[i]),
        tokens.get(i), BlockTokenSecretManager.AccessMode.READ);
  }

  DataNodeFaultInjector.get().getHdfsBlocksMetadata();

  return data.getHdfsBlocksMetadata(bpId, blockIds);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:DataNode.java

示例11: checkReadAccess

import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; //导入依赖的package包/类
private void checkReadAccess(final ExtendedBlock block) throws IOException {
  if (isBlockTokenEnabled) {
    Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
        .getTokenIdentifiers();
    if (tokenIds.size() != 1) {
      throw new IOException("Can't continue since none or more than one "
          + "BlockTokenIdentifier is found.");
    }
    for (TokenIdentifier tokenId : tokenIds) {
      BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
      if (LOG.isDebugEnabled()) {
        LOG.debug("Got: " + id.toString());
      }
      blockPoolTokenSecretManager.checkAccess(id, null, block,
          BlockTokenSecretManager.AccessMode.READ);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:DataNode.java

示例12: getBlockLocalPathInfo

import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; //导入依赖的package包/类
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
    Token<BlockTokenIdentifier> token) throws IOException {
  GetBlockLocalPathInfoRequestProto req =
      GetBlockLocalPathInfoRequestProto.newBuilder()
      .setBlock(PBHelper.convert(block))
      .setToken(PBHelper.convert(token)).build();
  GetBlockLocalPathInfoResponseProto resp;
  try {
    resp = rpcProxy.getBlockLocalPathInfo(NULL_CONTROLLER, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  return new BlockLocalPathInfo(PBHelper.convert(resp.getBlock()),
      resp.getLocalPath(), resp.getLocalMetaPath());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:ClientDatanodeProtocolTranslatorPB.java

示例13: peerFromSocketAndKey

import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; //导入依赖的package包/类
public static Peer peerFromSocketAndKey(
      SaslDataTransferClient saslClient, Socket s,
      DataEncryptionKeyFactory keyFactory,
      Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
      throws IOException {
  Peer peer = null;
  boolean success = false;
  try {
    peer = peerFromSocket(s);
    peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtils.cleanup(null, peer);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TcpPeerServer.java

示例14: transferRbw

import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; //导入依赖的package包/类
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:DFSTestUtil.java

示例15: createLocatedBlock

import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; //导入依赖的package包/类
private LocatedBlock createLocatedBlock() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
          AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
          AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
          AdminStates.NORMAL),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
          AdminStates.NORMAL),
  };
  String[] storageIDs = {"s1", "s2", "s3", "s4"};
  StorageType[] media = {
      StorageType.DISK,
      StorageType.SSD,
      StorageType.DISK,
      StorageType.RAM_DISK
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53),
      dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestPBHelper.java


注:本文中的org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。