当前位置: 首页>>代码示例>>Java>>正文


Java BlockReader.newBlockReader方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.BlockReader.newBlockReader方法的典型用法代码示例。如果您正苦于以下问题:Java BlockReader.newBlockReader方法的具体用法?Java BlockReader.newBlockReader怎么用?Java BlockReader.newBlockReader使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.BlockReader的用法示例。


在下文中一共展示了BlockReader.newBlockReader方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: accessBlock

import org.apache.hadoop.hdfs.BlockReader; //导入方法依赖的package包/类
/**
 * try to access a block on a data node. If fails - throws exception
 * @param datanode
 * @param lblock
 * @throws IOException
 */
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
  throws IOException {
  InetSocketAddress targetAddr = null;
  Socket s = null;
  BlockReader blockReader = null; 
  Block block = lblock.getBlock(); 
 
  targetAddr = NetUtils.createSocketAddr(datanode.getName());
    
  s = new Socket();
  s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
  s.setSoTimeout(HdfsConstants.READ_TIMEOUT);

  String file = BlockReader.getFileName(targetAddr, block.getBlockId());
  blockReader = 
    BlockReader.newBlockReader(s, file, block, lblock
      .getBlockToken(), 0, -1, 4096);

  // nothing - if it fails - it will throw and exception
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:27,代码来源:TestDataNodeVolumeFailure.java

示例2: streamBlockInAscii

import org.apache.hadoop.hdfs.BlockReader; //导入方法依赖的package包/类
public static void streamBlockInAscii(InetSocketAddress addr, 
    long blockId, Token<BlockTokenIdentifier> blockToken, long genStamp,
    long blockSize, long offsetIntoBlock, long chunkSizeToView,
    JspWriter out, Configuration conf) throws IOException {
  if (chunkSizeToView == 0) return;
  Socket s = new Socket();
  s.connect(addr, HdfsConstants.READ_TIMEOUT);
  s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
    
    long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock);     
    
    // Use the block name for file name. 
    String file = BlockReader.getFileName(addr, blockId);
    BlockReader blockReader = BlockReader.newBlockReader(s, file,
      new Block(blockId, 0, genStamp), blockToken,
      offsetIntoBlock, amtToRead, conf.getInt("io.file.buffer.size", 4096));
      
  byte[] buf = new byte[(int)amtToRead];
  int readOffset = 0;
  int retries = 2;
  while ( amtToRead > 0 ) {
    int numRead;
    try {
      numRead = blockReader.readAll(buf, readOffset, (int)amtToRead);
    }
    catch (IOException e) {
      retries--;
      if (retries == 0)
        throw new IOException("Could not read data from datanode");
      continue;
    }
    amtToRead -= numRead;
    readOffset += numRead;
  }
  blockReader = null;
  s.close();
  out.print(HtmlQuoting.quoteHtmlChars(new String(buf)));
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:39,代码来源:JspHelper.java

示例3: streamBlockInAscii

import org.apache.hadoop.hdfs.BlockReader; //导入方法依赖的package包/类
public void streamBlockInAscii(InetSocketAddress addr, int namespaceId,
                               long blockId, long genStamp, long blockSize,
                               long offsetIntoBlock, long chunkSizeToView, JspWriter out) 
  throws IOException {
  if (chunkSizeToView == 0) return;
  Socket s = new Socket();
  s.connect(addr, HdfsConstants.READ_TIMEOUT);
  s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
    
    long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock);     
    
    // Use the block name for file name. 
    BlockReader blockReader = 
      BlockReader.newBlockReader(DataTransferProtocol.DATA_TRANSFER_VERSION,
                                  namespaceId,
                                  s, addr.toString() + ":" + blockId,
                                  blockId, genStamp ,offsetIntoBlock, 
                                  amtToRead, 
                                  conf.getInt("io.file.buffer.size", 4096));
      
  byte[] buf = new byte[(int)amtToRead];
  int readOffset = 0;
  int retries = 2;
  while ( amtToRead > 0 ) {
    int numRead;
    try {
      numRead = blockReader.readAll(buf, readOffset, (int)amtToRead);
    }
    catch (IOException e) {
      retries--;
      if (retries == 0)
        throw new IOException("Could not read data from datanode");
      continue;
    }
    amtToRead -= numRead;
    readOffset += numRead;
  }
  blockReader = null;
  s.close();
  out.print(new String(buf));
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:42,代码来源:JspHelper.java

示例4: tryRead

import org.apache.hadoop.hdfs.BlockReader; //导入方法依赖的package包/类
private static void tryRead(Configuration conf, LocatedBlock lblock,
    boolean shouldSucceed) {
  InetSocketAddress targetAddr = null;
  Socket s = null;
  BlockReader blockReader = null;
  Block block = lblock.getBlock();
  try {
    DatanodeInfo[] nodes = lblock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
    s = new Socket();
    s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
    s.setSoTimeout(HdfsConstants.READ_TIMEOUT);

    String file = BlockReader.getFileName(targetAddr, block.getBlockId());
    blockReader = BlockReader.newBlockReader(s, file, block, 
        lblock.getBlockToken(), 0, -1, 
        conf.getInt("io.file.buffer.size", 4096));

  } catch (IOException ex) {
    if (ex instanceof InvalidBlockTokenException) {
      assertFalse("OP_READ_BLOCK: access token is invalid, "
          + "when it is expected to be valid", shouldSucceed);
      return;
    }
    fail("OP_READ_BLOCK failed due to reasons other than access token");
  } finally {
    if (s != null) {
      try {
        s.close();
      } catch (IOException iex) {
      } finally {
        s = null;
      }
    }
  }
  if (blockReader == null) {
    fail("OP_READ_BLOCK failed due to reasons other than access token");
  }
  assertTrue("OP_READ_BLOCK: access token is valid, "
      + "when it is expected to be invalid", shouldSucceed);
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:42,代码来源:TestBlockTokenWithDFS.java


注:本文中的org.apache.hadoop.hdfs.BlockReader.newBlockReader方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。