当前位置: 首页>>代码示例>>Java>>正文


Java DataTransferProtocol.DATA_TRANSFER_VERSION属性代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.DataTransferProtocol.DATA_TRANSFER_VERSION属性的典型用法代码示例。如果您正苦于以下问题:Java DataTransferProtocol.DATA_TRANSFER_VERSION属性的具体用法?Java DataTransferProtocol.DATA_TRANSFER_VERSION怎么用?Java DataTransferProtocol.DATA_TRANSFER_VERSION使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.hdfs.protocol.DataTransferProtocol的用法示例。


在下文中一共展示了DataTransferProtocol.DATA_TRANSFER_VERSION属性的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createLocatedBlocks

LocatedBlocks createLocatedBlocks(List<LocatedBlock> blocks,
    BlockMetaInfoType type,int namespaceid, int methodsFingerprint) {
  switch (type) {
  case VERSION_AND_NAMESPACEID:
    return new LocatedBlocksWithMetaInfo(
        computeContentSummary().getLength(), blocks,
        isUnderConstruction(), DataTransferProtocol.DATA_TRANSFER_VERSION,
        namespaceid, methodsFingerprint);
  case VERSION:
    return new VersionedLocatedBlocks(computeContentSummary().getLength(), blocks,
      isUnderConstruction(), DataTransferProtocol.DATA_TRANSFER_VERSION);
  default:
    return new LocatedBlocks(computeContentSummary().getLength(), blocks,
      isUnderConstruction());
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:16,代码来源:INode.java

示例2: register

/** 
 * Register standby with this primary
 */
@Override
public int register() throws IOException {
  enforceActive("Standby can only register with active namenode");
  verifyCheckpointerAddress();
  return DataTransferProtocol.DATA_TRANSFER_VERSION;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:9,代码来源:AvatarNode.java

示例3: updateDataTransferProtocolVersionIfNeeded

void updateDataTransferProtocolVersionIfNeeded(int remoteDataTransferVersion) {
  int newDataTransferVersion = 0;
  if (remoteDataTransferVersion < DataTransferProtocol.DATA_TRANSFER_VERSION) {
    // client is newer than server
    newDataTransferVersion = remoteDataTransferVersion;
  } else {
    // client is older or the same as server
    newDataTransferVersion = DataTransferProtocol.DATA_TRANSFER_VERSION;
  }
  synchronized (dataTransferVersion) {
    if (dataTransferVersion != newDataTransferVersion) {
      dataTransferVersion = newDataTransferVersion;
    }
  }    
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:15,代码来源:DFSClient.java

示例4: sendRequest

private void sendRequest(DataOutputStream out) throws IOException {
  /* Write the header */
  ReplaceBlockHeader replaceBlockHeader = new ReplaceBlockHeader(
      DataTransferProtocol.DATA_TRANSFER_VERSION, namespaceId,
      block.getBlock().getBlockId(), block.getBlock().getGenerationStamp(),
      source.getStorageID(), proxySource.getDatanode());
  replaceBlockHeader.writeVersionAndOpCode(out);
  replaceBlockHeader.write(out);
  out.flush();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:10,代码来源:Balancer.java

示例5: updateDatanodeInfo

/**
 * Updates DatanodeInfo for each LocatedBlock in locatedBlocks.
 */
LocatedBlocksWithMetaInfo updateDatanodeInfo(LocatedBlocks locatedBlocks)
    throws IOException {
  if (locatedBlocks.getLocatedBlocks().size() == 0)
    return new LocatedBlocksWithMetaInfo(locatedBlocks.getFileLength(),
        locatedBlocks.getLocatedBlocks(), false,
        DataTransferProtocol.DATA_TRANSFER_VERSION, getNamespaceId(),
        this.nameNode.getClientProtocolMethodsFingerprint());
  List<LocatedBlock> newBlocks = new ArrayList<LocatedBlock>();

  readLock();
  try {
    for (LocatedBlock locBlock: locatedBlocks.getLocatedBlocks()) {
      Block block = locBlock.getBlock();
      int numNodes = blocksMap.numNodes(block);
      int numCorruptNodes = countNodes(block).corruptReplicas();
      int numCorruptReplicas = corruptReplicas.numCorruptReplicas(block); 

      if (numCorruptNodes != numCorruptReplicas) {
        LOG.warn("Inconsistent number of corrupt replicas for " + 
                 block + "blockMap has " + numCorruptNodes + 
                 " but corrupt replicas map has " + numCorruptReplicas);
      }

      boolean blockCorrupt = numCorruptNodes == numNodes;
      int numMachineSet = blockCorrupt ? numNodes : (numNodes - numCorruptNodes);
      DatanodeDescriptor[] machineSet = new DatanodeDescriptor[numMachineSet];

      if (numMachineSet > 0) {
        numNodes = 0;
        for(Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block); 
            it.hasNext();) {
          DatanodeDescriptor dn = it.next();
          boolean replicaCorrupt = corruptReplicas.isReplicaCorrupt(block, dn);
          if (blockCorrupt || (!blockCorrupt && !replicaCorrupt))
            machineSet[numNodes++] = dn;
        }
      }

      // We need to make a copy of the block object before releasing the lock
      // to prevent the state of block is changed after that and before the
      // object is serialized to clients, to avoid potential inconsistency.
      // Further optimization is possible to avoid some object copy. Since it
      // is so far not a critical path. We leave a safe approach here.
      //
      Block blockCopy  = null;
      if (block != null) {
        blockCopy = new Block(block);
      }
      LocatedBlock newBlock = new LocatedBlock(blockCopy, machineSet, 0,
          blockCorrupt);
newBlocks.add(newBlock);
    }
  } finally {
    readUnlock();
  }

  return new LocatedBlocksWithMetaInfo(locatedBlocks.getFileLength(),
      newBlocks, false, DataTransferProtocol.DATA_TRANSFER_VERSION,
      getNamespaceId(), this.nameNode.getClientProtocolMethodsFingerprint());
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:63,代码来源:FSNamesystem.java

示例6: call

/**
 * Do the deed, write the bytes
 */
public Boolean call() throws Exception {
  xmitsInProgress.getAndIncrement();
  Socket sock = null;
  DataOutputStream out = null;
  BlockSender blockSender = null;
  
  int dataTransferVersion = DataTransferProtocol.DATA_TRANSFER_VERSION;

  try {
    InetSocketAddress curTarget =
      NetUtils.createSocketAddr(targets[0].getName());
    sock = newSocket();
    NetUtils.connect(sock, curTarget, socketTimeout);
    sock.setSoTimeout(targets.length * socketTimeout);

    long writeTimeout = socketWriteTimeout + socketWriteExtentionTimeout
        * (targets.length - 1);
    OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout);
    out = new DataOutputStream(new BufferedOutputStream(baseStream, 
                                                        SMALL_BUFFER_SIZE));

    blockSender = new BlockSender(srcNamespaceId, b, 0, b.getNumBytes(), false,
        false, false, false,
        dataTransferVersion >=
            DataTransferProtocol.PACKET_INCLUDE_VERSION_VERSION,
        true, datanode, null);
    DatanodeInfo srcNode = new DatanodeInfo(getDNRegistrationForNS(srcNamespaceId));

    //
    // Header info
    //
    WriteBlockHeader header = new WriteBlockHeader(
        dataTransferVersion, dstNamespaceId,
        destinationBlock.getBlockId(),
        destinationBlock.getGenerationStamp(), 0, false, true, srcNode,
        targets.length - 1, targets, "");
    header.writeVersionAndOpCode(out);
    header.write(out);

    // send data & checksum
    DataTransferThrottler trottler = null;
    if (dataTransferMaxRate > 0) {
      trottler = new DataTransferThrottler(dataTransferMaxRate);
    }
    blockSender.sendBlock(out, baseStream, trottler);

    // no response necessary
    LOG.info(getDatanodeInfo() + ":Transmitted block " + b + " at " + srcNamespaceId + " to " + curTarget);

  } catch (IOException ie) {
    LOG.warn(getDatanodeInfo() + ":Failed to transfer " + b + " at " + srcNamespaceId + " to " + targets[0].getName()
        + " got " + StringUtils.stringifyException(ie));
    // check if there are any disk problem
    try{
      datanode.checkDiskError();
    } catch (IOException e) {
      LOG.warn("Error when checking disks : " + StringUtils.stringifyException(e));
      throw e;
    }
    throw ie;
  } finally {
    xmitsInProgress.getAndDecrement();
    IOUtils.closeStream(blockSender);
    IOUtils.closeStream(out);
    IOUtils.closeSocket(sock);
  }
  return true;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:71,代码来源:DataNode.java

示例7: run

/**
 * Read/write data from/to the DataXceiveServer.
 */
public void run() {
  DataInputStream in=null; 
  try {
    in = new DataInputStream(
        new BufferedInputStream(NetUtils.getInputStream(s), 
                                SMALL_BUFFER_SIZE));
    short version = in.readShort();
    if ( version != DataTransferProtocol.DATA_TRANSFER_VERSION ) {
      throw new IOException( "Version Mismatch" );
    }
    boolean local = s.getInetAddress().equals(s.getLocalAddress());
    byte op = in.readByte();
    // Make sure the xciver count is not exceeded
    int curXceiverCount = datanode.getXceiverCount();
    if (curXceiverCount > dataXceiverServer.maxXceiverCount) {
      throw new IOException("xceiverCount " + curXceiverCount
                            + " exceeds the limit of concurrent xcievers "
                            + dataXceiverServer.maxXceiverCount);
    }
    long startTime = DataNode.now();
    switch ( op ) {
    case DataTransferProtocol.OP_READ_BLOCK:
      readBlock( in );
      datanode.myMetrics.addReadBlockOp(DataNode.now() - startTime);
      if (local)
        datanode.myMetrics.incrReadsFromLocalClient();
      else
        datanode.myMetrics.incrReadsFromRemoteClient();
      break;
    case DataTransferProtocol.OP_WRITE_BLOCK:
      writeBlock( in );
      datanode.myMetrics.addWriteBlockOp(DataNode.now() - startTime);
      if (local)
        datanode.myMetrics.incrWritesFromLocalClient();
      else
        datanode.myMetrics.incrWritesFromRemoteClient();
      break;
    case DataTransferProtocol.OP_REPLACE_BLOCK: // for balancing purpose; send to a destination
      replaceBlock(in);
      datanode.myMetrics.addReplaceBlockOp(DataNode.now() - startTime);
      break;
    case DataTransferProtocol.OP_COPY_BLOCK:
          // for balancing purpose; send to a proxy source
      copyBlock(in);
      datanode.myMetrics.addCopyBlockOp(DataNode.now() - startTime);
      break;
    case DataTransferProtocol.OP_BLOCK_CHECKSUM: //get the checksum of a block
      getBlockChecksum(in);
      datanode.myMetrics.addBlockChecksumOp(DataNode.now() - startTime);
      break;
    default:
      throw new IOException("Unknown opcode " + op + " in data stream");
    }
  } catch (Throwable t) {
    LOG.error(datanode.dnRegistration + ":DataXceiver",t);
  } finally {
    LOG.debug(datanode.dnRegistration + ":Number of active connections is: "
                             + datanode.getXceiverCount());
    IOUtils.closeStream(in);
    IOUtils.closeSocket(s);
    dataXceiverServer.childSockets.remove(s);
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:66,代码来源:DataXceiver.java

示例8: init

void init() throws IOException {
	//long startTime = System.currentTimeMillis();
	
	erasedStripe = getErasedStripe(srcFs, srcFile,
			parityFs, parityFile, codec, blockIndex);
	ErasureCode ec = null;
	if (codec.id.equals("crs"))
		ec = new CauchyRSCode();
	else if (codec.id.equals("lrc"))
		ec = new LocallyRepairableCode();
	ec.init(codec);
	LocatedBlockWithMetaInfo[] blocks = erasedStripe.getBlocks();
	CandidateLocations candidate = ec.getCandidateLocations(erasedStripe, 
			erasedLocationToFix);
	if (candidate == null) {
		throw new TooManyErasedLocations("to many erasures");
	}
	int[] candidateLocations = candidate.locations;
	int minNum = candidate.minNum;
	DatanodeInfo root = srcFs.getClient().getDatanodeInfo();
	String rootHost = root.name;
	DatanodeInfo[] datanodeInfos = BlockReconstructor.CorruptBlockReconstructor.
			getCandidateDatanodeInfos(erasedStripe, candidateLocations, root);
	nodes = BlockReconstructor.CorruptBlockReconstructor.
			getCandidateNodes(erasedStripe, candidateLocations, rootHost);
	if (nodes == null)
		throw new IOException("nodes is null");
	int[][] distances = BlockReconstructor.CorruptBlockReconstructor.
			getRealDistances(datanodeInfos);
	MinimumSpanningTree.Result result;
	if(structure.equals("tree")) {
		result = MinimumSpanningTree.chooseAndBuildTree(nodes, 
				distances, 0, minNum);
	} else if(structure.equals("line")){
		result = MinimumSpanningTree.chooseAndBuildLine(nodes, 
				distances, 0, minNum);
	} else {
		result = MinimumSpanningTree.chooseAndBuildStar(nodes, 
				distances, 0, minNum);
	}
	
	int[] choosed = result.chosed;
	if (choosed == null)
		throw new IOException("choosed is null");
	int[] locationsToUse = ec.getLocationsToUse(erasedStripe, nodes, 
			choosed, erasedLocationToFix);
	int[] recoverVector = ec.getRecoverVector(locationsToUse, erasedLocationToFix);
	
	if (recoverVector == null)
		throw new IOException("recoverVector is null");
	
	for (int j = 0; j < choosed.length; j++) {
		nodes[choosed[j]].getElement().setCoefficient(recoverVector[j]);
	}
	LocatedBlockWithMetaInfo block = blocks[erasedLocationToFix];
	
	header = new MergeBlockHeader(new VersionAndOpcode(
			DataTransferProtocol.DATA_TRANSFER_VERSION, 
			DataTransferProtocol.OP_MERGE_BLOCK));
	header.setLevel(1);
	header.setRecovery(true);
	header.setNamespaceId(block.getNamespaceID());
	header.setBlockId(block.getBlock().getBlockId());
	header.setGenStamp(block.getBlock().getGenerationStamp());
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:65,代码来源:Decoder.java

示例9: planRepairTask

RepairTask planRepairTask(PathInfo pathInfo, Stripe stripe, int erasedLocation, 
		ErasureCode ec) throws IOException {
	
	MergeBlockHeader header = new MergeBlockHeader(new VersionAndOpcode(
			DataTransferProtocol.DATA_TRANSFER_VERSION, 
			DataTransferProtocol.OP_MERGE_BLOCK));
	header.setLevel(0);
	header.setRecovery(false);
	LocatedBlockWithMetaInfo[] blocks = stripe.getBlocks();
	CandidateLocations candidate = ec.getCandidateLocations(stripe, erasedLocation);
	if (candidate == null) {
		LOG.warn("NTar : There are not enougth live blocks to reconstrunct lost block !");
		return null;
	}
	int[] candidateLocations = candidate.locations;
	int minNum = candidate.minNum;
	DatanodeInfo root = chooseRootDatanodeInfo(stripe, erasedLocation);
	String rootHost = root.name;
	DatanodeInfo[] datanodeInfos = getCandidateDatanodeInfos(stripe, candidateLocations, root);
	RecoverTreeNode[] nodes = getCandidateNodes(stripe, candidateLocations, rootHost);
	int[][] distances = getRealDistances(datanodeInfos);
	MinimumSpanningTree.Result result;
	int[] choosed;
	if(structure.equals("tree")) {
		result = MinimumSpanningTree.chooseAndBuildTree(nodes, distances, 0, minNum);
	} else if(structure.equals("line")){
		result = MinimumSpanningTree.chooseAndBuildLine(nodes, distances, 0, minNum);
	} else {
		result = MinimumSpanningTree.chooseAndBuildStar(nodes, distances, 0, minNum);
	}
	if (result == null)
		return null;
	choosed = result.chosed;
	int[] locationsToUse = ec.getLocationsToUse(stripe, nodes, choosed, erasedLocation);
	int[] recoverVector = ec.getRecoverVector(locationsToUse, erasedLocation);
	for(int j = 0; j < choosed.length; j++) {
		nodes[choosed[j]].getElement().setCoefficient(recoverVector[j]);
	}
	LocatedBlockWithMetaInfo block = blocks[erasedLocation];
	header.setNamespaceId(block.getNamespaceID());
	header.setBlockId(block.getBlock().getBlockId());
	header.setGenStamp(block.getBlock().getGenerationStamp());
	header.setOffsetInBlock(0);
	header.setLength(block.getBlockSize());
	return new RepairTask(pathInfo, result, nodes[0], nodes, header);
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:46,代码来源:BlockReconstructor.java

示例10: call

/**
 * Do the deed, write the bytes
 */
public Boolean call() throws Exception {
  xmitsInProgress.getAndIncrement();
  Socket sock = null;
  DataOutputStream out = null;
  BlockSender blockSender = null;

  try {
    InetSocketAddress curTarget =
      NetUtils.createSocketAddr(targets[0].getName());
    sock = newSocket();
    NetUtils.connect(sock, curTarget, socketTimeout);
    sock.setSoTimeout(targets.length * socketTimeout);

    long writeTimeout = socketWriteTimeout + socketWriteExtentionTimeout
        * (targets.length - 1);
    OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout);
    out = new DataOutputStream(new BufferedOutputStream(baseStream, 
                                                        SMALL_BUFFER_SIZE));

    blockSender = new BlockSender(srcNamespaceId, b, 0, b.getNumBytes(),
        false, false, false, datanode);
    DatanodeInfo srcNode = new DatanodeInfo(getDNRegistrationForNS(srcNamespaceId));

    //
    // Header info
    //
    WriteBlockHeader header = new WriteBlockHeader(
        DataTransferProtocol.DATA_TRANSFER_VERSION, dstNamespaceId,
        destinationBlock.getBlockId(),
        destinationBlock.getGenerationStamp(), 0, false, true, srcNode,
        targets.length - 1, targets, "");
    header.writeVersionAndOpCode(out);
    header.write(out);

    // send data & checksum
    blockSender.sendBlock(out, baseStream, null);

    // no response necessary
    LOG.info(getDatanodeInfo() + ":Transmitted block " + b + " at " + srcNamespaceId + " to " + curTarget);

  } catch (IOException ie) {
    LOG.warn(getDatanodeInfo() + ":Failed to transfer " + b + " at " + srcNamespaceId + " to " + targets[0].getName()
        + " got " + StringUtils.stringifyException(ie));
    // check if there are any disk problem
    try{
      datanode.checkDiskError();
    } catch (IOException e) {
      LOG.warn("Error when checking disks : " + StringUtils.stringifyException(e));
      throw e;
    }
    throw ie;
  } finally {
    xmitsInProgress.getAndDecrement();
    IOUtils.closeStream(blockSender);
    IOUtils.closeStream(out);
    IOUtils.closeSocket(sock);
  }
  return true;
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:62,代码来源:DataNode.java

示例11: run

/**
 * Read/write data from/to the DataXceiveServer.
 */
public void run() {
  DataInputStream in=null; 
  try {
    in = new DataInputStream(
        new BufferedInputStream(NetUtils.getInputStream(s), 
                                SMALL_BUFFER_SIZE));
    short version = in.readShort();
    if ( version != DataTransferProtocol.DATA_TRANSFER_VERSION ) {
      throw new IOException( "Version Mismatch" );
    }
    boolean local = s.getInetAddress().equals(s.getLocalAddress());
    byte op = in.readByte();
    // Make sure the xciver count is not exceeded
    int curXceiverCount = datanode.getXceiverCount();
    if (curXceiverCount > dataXceiverServer.maxXceiverCount) {
      throw new IOException("xceiverCount " + curXceiverCount
                            + " exceeds the limit of concurrent xcievers "
                            + dataXceiverServer.maxXceiverCount);
    }
    long startTime = DataNode.now();
    switch ( op ) {
    case DataTransferProtocol.OP_READ_BLOCK:
      readBlock( in );
      datanode.myMetrics.readBlockOp.inc(DataNode.now() - startTime);
      if (local)
        datanode.myMetrics.readsFromLocalClient.inc();
      else
        datanode.myMetrics.readsFromRemoteClient.inc();
      break;
    case DataTransferProtocol.OP_WRITE_BLOCK:
      writeBlock( in );
      datanode.myMetrics.writeBlockOp.inc(DataNode.now() - startTime);
      if (local)
        datanode.myMetrics.writesFromLocalClient.inc();
      else
        datanode.myMetrics.writesFromRemoteClient.inc();
      break;
    case DataTransferProtocol.OP_READ_METADATA:
      readMetadata( in );
      datanode.myMetrics.readMetadataOp.inc(DataNode.now() - startTime);
      break;
    case DataTransferProtocol.OP_REPLACE_BLOCK: // for balancing purpose; send to a destination
      replaceBlock(in);
      datanode.myMetrics.replaceBlockOp.inc(DataNode.now() - startTime);
      break;
    case DataTransferProtocol.OP_COPY_BLOCK:
          // for balancing purpose; send to a proxy source
      copyBlock(in);
      datanode.myMetrics.copyBlockOp.inc(DataNode.now() - startTime);
      break;
    case DataTransferProtocol.OP_BLOCK_CHECKSUM: //get the checksum of a block
      getBlockChecksum(in);
      datanode.myMetrics.blockChecksumOp.inc(DataNode.now() - startTime);
      break;
    default:
      throw new IOException("Unknown opcode " + op + " in data stream");
    }
  } catch (Throwable t) {
    LOG.error(datanode.dnRegistration + ":DataXceiver",t);
  } finally {
    LOG.debug(datanode.dnRegistration + ":Number of active connections is: "
                             + datanode.getXceiverCount());
    IOUtils.closeStream(in);
    IOUtils.closeSocket(s);
    dataXceiverServer.childSockets.remove(s);
  }
}
 
开发者ID:thisisvoa,项目名称:hadoop-0.20,代码行数:70,代码来源:DataXceiver.java


注:本文中的org.apache.hadoop.hdfs.protocol.DataTransferProtocol.DATA_TRANSFER_VERSION属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。