当前位置: 首页>>代码示例>>Java>>正文


Java DatanodeID类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.DatanodeID的典型用法代码示例。如果您正苦于以下问题:Java DatanodeID类的具体用法?Java DatanodeID怎么用?Java DatanodeID使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DatanodeID类属于org.apache.hadoop.hdfs.protocol包,在下文中一共展示了DatanodeID类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: peerFromSocketAndKey

import org.apache.hadoop.hdfs.protocol.DatanodeID; //导入依赖的package包/类
public static Peer peerFromSocketAndKey(
    SaslDataTransferClient saslClient, Socket s,
    DataEncryptionKeyFactory keyFactory,
    Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
    throws IOException {
  Peer peer = null;
  boolean success = false;
  try {
    peer = peerFromSocket(s);
    peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtilsClient.cleanup(null, peer);
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:19,代码来源:NuCypherExtUtilClient.java

示例2: RemoteBlockReader2

import org.apache.hadoop.hdfs.protocol.DatanodeID; //导入依赖的package包/类
protected RemoteBlockReader2(String file, String bpid, long blockId,
    DataChecksum checksum, boolean verifyChecksum,
    long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
    DatanodeID datanodeID, PeerCache peerCache) {
  this.isLocal = DFSClient.isLocalAddress(NetUtils.
      createSocketAddr(datanodeID.getXferAddr()));
  // Path is used only for printing block and file information in debug
  this.peer = peer;
  this.datanodeID = datanodeID;
  this.in = peer.getInputStreamChannel();
  this.checksum = checksum;
  this.verifyChecksum = verifyChecksum;
  this.startOffset = Math.max( startOffset, 0 );
  this.filename = file;
  this.peerCache = peerCache;
  this.blockId = blockId;

  // The total number of bytes that we need to transfer from the DN is
  // the amount that the user wants (bytesToRead), plus the padding at
  // the beginning in order to chunk-align. Note that the DN may elect
  // to send more than this amount if the read starts/ends mid-chunk.
  this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
  bytesPerChecksum = this.checksum.getBytesPerChecksum();
  checksumSize = this.checksum.getChecksumSize();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:RemoteBlockReader2.java

示例3: checkTrustAndSend

import org.apache.hadoop.hdfs.protocol.DatanodeID; //导入依赖的package包/类
/**
 * Checks if an address is already trusted and then sends client SASL
 * negotiation if required.
 *
 * @param addr connection address
 * @param underlyingOut connection output stream
 * @param underlyingIn connection input stream
 * @param encryptionKeyFactory for creation of an encryption key
 * @param accessToken connection block access token
 * @param datanodeId ID of destination DataNode
 * @return new pair of streams, wrapped after SASL negotiation
 * @throws IOException for any error
 */
private IOStreamPair checkTrustAndSend(InetAddress addr,
    OutputStream underlyingOut, InputStream underlyingIn,
    DataEncryptionKeyFactory encryptionKeyFactory,
    Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
    throws IOException {
  if (!trustedChannelResolver.isTrusted() &&
      !trustedChannelResolver.isTrusted(addr)) {
    // The encryption key factory only returns a key if encryption is enabled.
    DataEncryptionKey encryptionKey =
      encryptionKeyFactory.newDataEncryptionKey();
    return send(addr, underlyingOut, underlyingIn, encryptionKey, accessToken,
      datanodeId);
  } else {
    LOG.debug(
      "SASL client skipping handshake on trusted connection for addr = {}, "
      + "datanodeId = {}", addr, datanodeId);
    return null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:SaslDataTransferClient.java

示例4: newConnectedPeer

import org.apache.hadoop.hdfs.protocol.DatanodeID; //导入依赖的package包/类
@Override // RemotePeerFactory
public Peer newConnectedPeer(InetSocketAddress addr,
    Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
    throws IOException {
  Peer peer = null;
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    NetUtils.connect(sock, addr,
      getRandomLocalInterfaceAddr(),
      dfsClientConf.socketTimeout);
    peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
        blockToken, datanodeId);
    peer.setReadTimeout(dfsClientConf.socketTimeout);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtils.cleanup(LOG, peer);
      IOUtils.closeSocket(sock);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:DFSClient.java

示例5: getInternal

import org.apache.hadoop.hdfs.protocol.DatanodeID; //导入依赖的package包/类
private synchronized Peer getInternal(DatanodeID dnId, boolean isDomain) {
  List<Value> sockStreamList = multimap.get(new Key(dnId, isDomain));
  if (sockStreamList == null) {
    return null;
  }

  Iterator<Value> iter = sockStreamList.iterator();
  while (iter.hasNext()) {
    Value candidate = iter.next();
    iter.remove();
    long ageMs = Time.monotonicNow() - candidate.getTime();
    Peer peer = candidate.getPeer();
    if (ageMs >= expiryPeriod) {
      try {
        peer.close();
      } catch (IOException e) {
        LOG.warn("got IOException closing stale peer " + peer +
              ", which is " + ageMs + " ms old");
      }
    } else if (!peer.isClosed()) {
      return peer;
    }
  }
  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:PeerCache.java

示例6: doGet

import org.apache.hadoop.hdfs.protocol.DatanodeID; //导入依赖的package包/类
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
    ) throws ServletException, IOException {
  final ServletContext context = getServletContext();
  final Configuration conf = NameNodeHttpServer.getConfFromContext(context);
  final UserGroupInformation ugi = getUGI(request, conf);
  final NameNode namenode = NameNodeHttpServer.getNameNodeFromContext(
      context);
  final DatanodeID datanode = NamenodeJspHelper.getRandomDatanode(namenode);
  try {
    response.sendRedirect(
        createRedirectURL(ugi, datanode, request, namenode).toString());
  } catch (IOException e) {
    response.sendError(400, e.getMessage());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:FileChecksumServlets.java

示例7: createInterDataNodeProtocolProxy

import org.apache.hadoop.hdfs.protocol.DatanodeID; //导入依赖的package包/类
public static InterDatanodeProtocol createInterDataNodeProtocolProxy(
    DatanodeID datanodeid, final Configuration conf, final int socketTimeout,
    final boolean connectToDnViaHostname) throws IOException {
  final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
  final InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
  }
  final UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
  try {
    return loginUgi
        .doAs(new PrivilegedExceptionAction<InterDatanodeProtocol>() {
          @Override
          public InterDatanodeProtocol run() throws IOException {
            return new InterDatanodeProtocolTranslatorPB(addr, loginUgi,
                conf, NetUtils.getDefaultSocketFactory(conf), socketTimeout);
          }
        });
  } catch (InterruptedException ie) {
    throw new IOException(ie.getMessage());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:DataNode.java

示例8: commitBlockSynchronization

import org.apache.hadoop.hdfs.protocol.DatanodeID; //导入依赖的package包/类
@Override
public void commitBlockSynchronization(ExtendedBlock block,
    long newgenerationstamp, long newlength, boolean closeFile,
    boolean deleteblock, DatanodeID[] newtargets, String[] newtargetstorages
    ) throws IOException {
  CommitBlockSynchronizationRequestProto.Builder builder = 
      CommitBlockSynchronizationRequestProto.newBuilder()
      .setBlock(PBHelper.convert(block)).setNewGenStamp(newgenerationstamp)
      .setNewLength(newlength).setCloseFile(closeFile)
      .setDeleteBlock(deleteblock);
  for (int i = 0; i < newtargets.length; i++) {
    builder.addNewTaragets(PBHelper.convert(newtargets[i]));
    builder.addNewTargetStorages(newtargetstorages[i]);
  }
  CommitBlockSynchronizationRequestProto req = builder.build();
  try {
    rpcProxy.commitBlockSynchronization(NULL_CONTROLLER, req);
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:DatanodeProtocolClientSideTranslatorPB.java

示例9: commitBlockSynchronization

import org.apache.hadoop.hdfs.protocol.DatanodeID; //导入依赖的package包/类
@Override
public CommitBlockSynchronizationResponseProto commitBlockSynchronization(
    RpcController controller, CommitBlockSynchronizationRequestProto request)
    throws ServiceException {
  List<DatanodeIDProto> dnprotos = request.getNewTaragetsList();
  DatanodeID[] dns = new DatanodeID[dnprotos.size()];
  for (int i = 0; i < dnprotos.size(); i++) {
    dns[i] = PBHelper.convert(dnprotos.get(i));
  }
  final List<String> sidprotos = request.getNewTargetStoragesList();
  final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);
  try {
    impl.commitBlockSynchronization(PBHelper.convert(request.getBlock()),
        request.getNewGenStamp(), request.getNewLength(),
        request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:DatanodeProtocolServerSideTranslatorPB.java

示例10: createClientDatanodeProtocolProxy

import org.apache.hadoop.hdfs.protocol.DatanodeID; //导入依赖的package包/类
static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
    DatanodeID datanodeid, Configuration conf, int socketTimeout,
    boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
  final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
  InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
  }
  
  // Since we're creating a new UserGroupInformation here, we know that no
  // future RPC proxies will be able to re-use the same connection. And
  // usages of this proxy tend to be one-off calls.
  //
  // This is a temporary fix: callers should really achieve this by using
  // RPC.stopProxy() on the resulting object, but this is currently not
  // working in trunk. See the discussion on HDFS-1965.
  Configuration confWithNoIpcIdle = new Configuration(conf);
  confWithNoIpcIdle.setInt(CommonConfigurationKeysPublic
      .IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);

  UserGroupInformation ticket = UserGroupInformation
      .createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());
  ticket.addToken(locatedBlock.getBlockToken());
  return createClientDatanodeProtocolProxy(addr, ticket, confWithNoIpcIdle,
      NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:ClientDatanodeProtocolTranslatorPB.java

示例11: updatePipeline

import org.apache.hadoop.hdfs.protocol.DatanodeID; //导入依赖的package包/类
@Override
public void updatePipeline(String clientName, ExtendedBlock oldBlock,
    ExtendedBlock newBlock, DatanodeID[] newNodes, String[] storageIDs) throws IOException {
  UpdatePipelineRequestProto req = UpdatePipelineRequestProto.newBuilder()
      .setClientName(clientName)
      .setOldBlock(PBHelper.convert(oldBlock))
      .setNewBlock(PBHelper.convert(newBlock))
      .addAllNewNodes(Arrays.asList(PBHelper.convert(newNodes)))
      .addAllStorageIDs(storageIDs == null ? null : Arrays.asList(storageIDs))
      .build();
  try {
    rpcProxy.updatePipeline(null, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:ClientNamenodeProtocolTranslatorPB.java

示例12: testCommitBlockSynchronization2

import org.apache.hadoop.hdfs.protocol.DatanodeID; //导入依赖的package包/类
@Test
public void testCommitBlockSynchronization2() throws IOException {
  INodeFile file = mockFileUnderConstruction();
  Block block = new Block(blockId, length, genStamp);
  FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
  DatanodeID[] newTargets = new DatanodeID[0];

  ExtendedBlock lastBlock = new ExtendedBlock();
  namesystemSpy.commitBlockSynchronization(
      lastBlock, genStamp, length, false,
      false, newTargets, null);

  // Make sure the call fails if the generation stamp does not match
  // the block recovery ID.
  try {
    namesystemSpy.commitBlockSynchronization(
        lastBlock, genStamp - 1, length, false, false, newTargets, null);
    fail("Failed to get expected IOException on generation stamp/" +
         "recovery ID mismatch");
  } catch (IOException ioe) {
    // Expected exception.
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestCommitBlockSynchronization.java

示例13: testCommitBlockSynchronizationWithDelete

import org.apache.hadoop.hdfs.protocol.DatanodeID; //导入依赖的package包/类
@Test
public void testCommitBlockSynchronizationWithDelete() throws IOException {
  INodeFile file = mockFileUnderConstruction();
  Block block = new Block(blockId, length, genStamp);
  FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
  DatanodeID[] newTargets = new DatanodeID[0];

  ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(
        lastBlock, genStamp, length, false,
        true, newTargets, null);

  // Simulate removing the last block from the file.
  doReturn(false).when(file).removeLastBlock(any(Block.class));

  // Repeat the call to make sure it does not throw
  namesystemSpy.commitBlockSynchronization(
      lastBlock, genStamp, length, false, true, newTargets, null);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestCommitBlockSynchronization.java

示例14: testCommitBlockSynchronizationWithCloseAndNonExistantTarget

import org.apache.hadoop.hdfs.protocol.DatanodeID; //导入依赖的package包/类
@Test
public void testCommitBlockSynchronizationWithCloseAndNonExistantTarget()
    throws IOException {
  INodeFile file = mockFileUnderConstruction();
  Block block = new Block(blockId, length, genStamp);
  FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
  DatanodeID[] newTargets = new DatanodeID[]{
      new DatanodeID("0.0.0.0", "nonexistantHost", "1", 0, 0, 0, 0)};

  ExtendedBlock lastBlock = new ExtendedBlock();
  namesystemSpy.commitBlockSynchronization(
      lastBlock, genStamp, length, true,
      false, newTargets, null);

  // Repeat the call to make sure it returns true
  namesystemSpy.commitBlockSynchronization(
      lastBlock, genStamp, length, true, false, newTargets, null);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestCommitBlockSynchronization.java

示例15: register

import org.apache.hadoop.hdfs.protocol.DatanodeID; //导入依赖的package包/类
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:NNThroughputBenchmark.java


注:本文中的org.apache.hadoop.hdfs.protocol.DatanodeID类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。