当前位置: 首页>>代码示例>>Java>>正文


Java HdfsFileStatus.getLen方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.HdfsFileStatus.getLen方法的典型用法代码示例。如果您正苦于以下问题:Java HdfsFileStatus.getLen方法的具体用法?Java HdfsFileStatus.getLen怎么用?Java HdfsFileStatus.getLen使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.HdfsFileStatus的用法示例。


在下文中一共展示了HdfsFileStatus.getLen方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getNfs3FileAttrFromFileStatus

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus(
    HdfsFileStatus fs, IdMappingServiceProvider iug) {
  /**
   * Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit
   * client takes only the lower 32bit of the fileId and treats it as signed
   * int. When the 32th bit is 1, the client considers it invalid.
   */
  NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
  fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
  int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1;
  long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs
      .getChildrenNum()) : fs.getLen();
  return new Nfs3FileAttributes(fileType, nlink,
      fs.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()),
      iug.getGidAllowingUnknown(fs.getGroup()), size, 0 /* fsid */,
      fs.getFileId(), fs.getModificationTime(), fs.getAccessTime(),
      new Nfs3FileAttributes.Specdata3());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:Nfs3Utils.java

示例2: DFSOutputStream

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
/** Construct a new output stream for append. */
private DFSOutputStream(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock,
    HdfsFileStatus stat, DataChecksum checksum) throws IOException {
  this(dfsClient, src, progress, stat, checksum);
  initialFileSize = stat.getLen(); // length of file when opened
  this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK);

  boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK);

  // The last partial block of the file has to be filled.
  if (!toNewBlock && lastBlock != null) {
    // indicate that we are appending to an existing block
    bytesCurBlock = lastBlock.getBlockSize();
    streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum);
  } else {
    computePacketChunkSize(dfsClient.getConf().writePacketSize,
        bytesPerChecksum);
    streamer = new DataStreamer(stat,
        lastBlock != null ? lastBlock.getBlock() : null);
  }
  this.fileEncryptionInfo = stat.getFileEncryptionInfo();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:DFSOutputStream.java

示例3: getWccAttr

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
public static WccAttr getWccAttr(DFSClient client, String fileIdPath)
    throws IOException {
  HdfsFileStatus fstat = getFileStatus(client, fileIdPath);
  if (fstat == null) {
    return null;
  }

  long size = fstat.isDir() ? getDirSize(fstat.getChildrenNum()) : fstat
      .getLen();
  return new WccAttr(size, new NfsTime(fstat.getModificationTime()),
      new NfsTime(fstat.getModificationTime()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:Nfs3Utils.java

示例4: makeQualified

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
private FileStatus makeQualified(HdfsFileStatus f, Path parent) {
  return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
      f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
      f.getPermission(), f.getOwner(), f.getGroup(),
      f.isSymlink() ? new Path(f.getSymlink()) : null,
      f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:WebHdfsFileSystem.java

示例5: pickSrcDatanode

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
/** Select a datanode to service this request.
 * Currently, this looks at no more than the first five blocks of a file,
 * selecting a datanode randomly from the most represented.
 * @param conf 
 */
private DatanodeID pickSrcDatanode(LocatedBlocks blks, HdfsFileStatus i,
    Configuration conf) throws IOException {
  if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) {
    // pick a random datanode
    NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
        getServletContext());
    return NamenodeJspHelper.getRandomDatanode(nn);
  }
  return JspHelper.bestNode(blks, conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:FileDataServlet.java

示例6: toFileStatus

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
static FileStatus toFileStatus(HdfsFileStatus f, String parent) {
  return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
      f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
      f.getPermission(), f.getOwner(), f.getGroup(),
      f.isSymlink() ? new Path(f.getSymlink()) : null,
      new Path(f.getFullName(parent)));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestJsonUtil.java

示例7: cacheInitialContents

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
private byte[] cacheInitialContents() throws IOException {
  HdfsFileStatus status = dfsClient.getFileInfo(name);
  byte[] content = new byte[(int)status.getLen()];
  DFSInputStream in = null;
  try {
    in = dfsClient.open(name);
    IOUtils.readFully(in, content, 0, content.length);
  } finally {
    in.close();
  }
  return content;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestFsck.java

示例8: checkSalvagedRemains

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
public void checkSalvagedRemains() throws IOException {
  int chainIdx = 0;
  HdfsFileStatus status = dfsClient.getFileInfo(name);
  long length = status.getLen();
  int numBlocks = (int)((length + blockSize - 1) / blockSize);
  DFSInputStream in = null;
  byte[] blockBuffer = new byte[blockSize];

  try {
    for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
      if (blocksToCorrupt.contains(blockIdx)) {
        if (in != null) {
          in.close();
          in = null;
        }
        continue;
      }
      if (in == null) {
        in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
        chainIdx++;
      }
      int len = blockBuffer.length;
      if (blockIdx == (numBlocks - 1)) {
        // The last block might not be full-length
        len = (int)(in.getFileLength() % blockSize);
        if (len == 0) len = blockBuffer.length;
      }
      IOUtils.readFully(in, blockBuffer, 0, len);
      int startIdx = blockIdx * blockSize;
      for (int i = 0; i < len; i++) {
        if (initialContents[startIdx + i] != blockBuffer[i]) {
          throw new IOException("salvaged file " + name + " differed " +
          "from what we expected on block " + blockIdx);
        }
      }
    }
  } finally {
    IOUtils.cleanup(null, in);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:TestFsck.java

示例9: DataStreamer

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
/**
 * Construct a data streamer for appending to the last partial block
 * @param lastBlock last block of the file to be appended
 * @param stat status of the file to be appended
 * @param bytesPerChecksum number of bytes per checksum
 * @throws IOException if error occurs
 */
private DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat,
    int bytesPerChecksum) throws IOException {
  isAppend = true;
  stage = BlockConstructionStage.PIPELINE_SETUP_APPEND;
  block = lastBlock.getBlock();
  bytesSent = block.getNumBytes();
  accessToken = lastBlock.getBlockToken();
  isLazyPersistFile = isLazyPersist(stat);
  long usedInLastBlock = stat.getLen() % blockSize;
  int freeInLastBlock = (int)(blockSize - usedInLastBlock);

  // calculate the amount of free space in the pre-existing 
  // last crc chunk
  int usedInCksum = (int)(stat.getLen() % bytesPerChecksum);
  int freeInCksum = bytesPerChecksum - usedInCksum;

  // if there is space in the last block, then we have to 
  // append to that block
  if (freeInLastBlock == blockSize) {
    throw new IOException("The last block for file " + 
        src + " is full.");
  }

  if (usedInCksum > 0 && freeInCksum > 0) {
    // if there is space in the last partial chunk, then 
    // setup in such a way that the next packet will have only 
    // one chunk that fills up the partial chunk.
    //
    computePacketChunkSize(0, freeInCksum);
    setChecksumBufSize(freeInCksum);
    appendChunk = true;
  } else {
    // if the remaining space in the block is smaller than 
    // that expected size of of a packet, then create 
    // smaller size packet.
    //
    computePacketChunkSize(Math.min(dfsClient.getConf().writePacketSize, freeInLastBlock), 
        bytesPerChecksum);
  }

  // setup pipeline to append to the last block XXX retries??
  setPipeline(lastBlock);
  errorIndex = -1;   // no errors yet.
  if (nodes.length < 1) {
    throw new IOException("Unable to retrieve blocks locations " +
        " for last block " + block +
        "of file " + src);

  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:DFSOutputStream.java

示例10: chooseDatanode

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize, final String excludeDatanodes) throws IOException {
  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  
  HashSet<Node> excludes = new HashSet<Node>();
  if (excludeDatanodes != null) {
    for (String host : StringUtils
        .getTrimmedStringCollection(excludeDatanodes)) {
      int idx = host.indexOf(":");
      if (idx != -1) {          
        excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
            host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
      } else {
        excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
      }
    }
  }

  if (op == PutOpParam.Op.CREATE) {
    //choose a datanode near to client 
    final DatanodeDescriptor clientNode = bm.getDatanodeManager(
        ).getDatanodeByHost(getRemoteAddress());
    if (clientNode != null) {
      final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(
          path, clientNode, excludes, blocksize);
      if (storages.length > 0) {
        return storages[0].getDatanodeDescriptor();
      }
    }
  } else if (op == GetOpParam.Op.OPEN
      || op == GetOpParam.Op.GETFILECHECKSUM
      || op == PostOpParam.Op.APPEND) {
    //choose a datanode containing a replica 
    final NamenodeProtocols np = getRPCServer(namenode);
    final HdfsFileStatus status = np.getFileInfo(path);
    if (status == null) {
      throw new FileNotFoundException("File " + path + " not found.");
    }
    final long len = status.getLen();
    if (op == GetOpParam.Op.OPEN) {
      if (openOffset < 0L || (openOffset >= len && len > 0)) {
        throw new IOException("Offset=" + openOffset
            + " out of the range [0, " + len + "); " + op + ", path=" + path);
      }
    }

    if (len > 0) {
      final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
      final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
      final int count = locations.locatedBlockCount();
      if (count > 0) {
        return bestNode(locations.get(0).getLocations(), excludes);
      }
    }
  } 

  return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
      ).chooseRandom(NodeBase.ROOT);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:62,代码来源:NamenodeWebHdfsMethods.java

示例11: testConcatNotCompleteBlock

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入方法依赖的package包/类
@Test
public void testConcatNotCompleteBlock() throws IOException {
  long trgFileLen = blockSize*3;
  long srcFileLen = blockSize*3+20; // block at the end - not full

  
  // create first file
  String name1="/trg", name2="/src";
  Path filePath1 = new Path(name1);
  DFSTestUtil.createFile(dfs, filePath1, trgFileLen, REPL_FACTOR, 1);
  
  HdfsFileStatus fStatus = nn.getFileInfo(name1);
  long fileLen = fStatus.getLen();
  assertEquals(fileLen, trgFileLen);
  
  //read the file
  FSDataInputStream stm = dfs.open(filePath1);
  byte[] byteFile1 = new byte[(int)trgFileLen];
  stm.readFully(0, byteFile1);
  stm.close();
  
  LocatedBlocks lb1 = nn.getBlockLocations(name1, 0, trgFileLen);
  
  Path filePath2 = new Path(name2);
  DFSTestUtil.createFile(dfs, filePath2, srcFileLen, REPL_FACTOR, 1);
  fStatus = nn.getFileInfo(name2);
  fileLen = fStatus.getLen();
  assertEquals(srcFileLen, fileLen);
  
  // read the file
  stm = dfs.open(filePath2);
  byte[] byteFile2 = new byte[(int)srcFileLen];
  stm.readFully(0, byteFile2);
  stm.close();
  
  LocatedBlocks lb2 = nn.getBlockLocations(name2, 0, srcFileLen);
  
  
  System.out.println("trg len="+trgFileLen+"; src len="+srcFileLen);
  
  // move the blocks
  dfs.concat(filePath1, new Path [] {filePath2});
  
  long totalLen = trgFileLen + srcFileLen;
  fStatus = nn.getFileInfo(name1);
  fileLen = fStatus.getLen();
  
  // read the resulting file
  stm = dfs.open(filePath1);
  byte[] byteFileConcat = new byte[(int)fileLen];
  stm.readFully(0, byteFileConcat);
  stm.close();
  
  LocatedBlocks lbConcat = nn.getBlockLocations(name1, 0, fileLen);
  
  //verifications
  // 1. number of blocks
  assertEquals(lbConcat.locatedBlockCount(), 
      lb1.locatedBlockCount() + lb2.locatedBlockCount());
  
  // 2. file lengths
  System.out.println("file1 len="+fileLen+"; total len="+totalLen);
  assertEquals(fileLen, totalLen);
  
  // 3. removal of the src file
  fStatus = nn.getFileInfo(name2);
  assertNull("File "+name2+ "still exists", fStatus); // file shouldn't exist

  // 4. content
  checkFileContent(byteFileConcat, new byte [] [] {byteFile1, byteFile2});
}
 
开发者ID:naver,项目名称:hadoop,代码行数:72,代码来源:TestHDFSConcat.java


注:本文中的org.apache.hadoop.hdfs.protocol.HdfsFileStatus.getLen方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。