当前位置: 首页>>代码示例>>Java>>正文


Java LocatedBlocks.locatedBlockCount方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount方法的典型用法代码示例。如果您正苦于以下问题:Java LocatedBlocks.locatedBlockCount方法的具体用法?Java LocatedBlocks.locatedBlockCount怎么用?Java LocatedBlocks.locatedBlockCount使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.LocatedBlocks的用法示例。


在下文中一共展示了LocatedBlocks.locatedBlockCount方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: collectFileSummary

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
private void collectFileSummary(String path, HdfsFileStatus file, Result res,
    LocatedBlocks blocks) throws IOException {
  long fileLen = file.getLen();
  boolean isOpen = blocks.isUnderConstruction();
  if (isOpen && !showOpenFiles) {
    // We collect these stats about open files to report with default options
    res.totalOpenFilesSize += fileLen;
    res.totalOpenFilesBlocks += blocks.locatedBlockCount();
    res.totalOpenFiles++;
    return;
  }
  res.totalFiles++;
  res.totalSize += fileLen;
  res.totalBlocks += blocks.locatedBlockCount();
  if (showOpenFiles && isOpen) {
    out.print(path + " " + fileLen + " bytes, " +
      blocks.locatedBlockCount() + " block(s), OPENFORWRITE: ");
  } else if (showFiles) {
    out.print(path + " " + fileLen + " bytes, " +
      blocks.locatedBlockCount() + " block(s): ");
  } else if (showprogress) {
    out.print('.');
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:25,代码来源:NamenodeFsck.java

示例2: testAbandonBlock

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
@Test
/** Abandon a block while creating a file */
public void testAbandonBlock() throws IOException {
  String src = FILE_NAME_PREFIX + "foo";

  // Start writing a file but do not close it
  FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)1, 512L);
  for (int i = 0; i < 1024; i++) {
    fout.write(123);
  }
  fout.hflush();
  long fileId = ((DFSOutputStream)fout.getWrappedStream()).getFileId();

  // Now abandon the last block
  DFSClient dfsclient = DFSClientAdapter.getDFSClient(fs);
  LocatedBlocks blocks =
    dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
  int orginalNumBlocks = blocks.locatedBlockCount();
  LocatedBlock b = blocks.getLastLocatedBlock();
  dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
      dfsclient.clientName);
  
  // call abandonBlock again to make sure the operation is idempotent
  dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
      dfsclient.clientName);

  // And close the file
  fout.close();

  // Close cluster and check the block has been abandoned after restart
  cluster.restartNameNode();
  blocks = dfsclient.getNamenode().getBlockLocations(src, 0,
      Integer.MAX_VALUE);
  Assert.assertEquals("Blocks " + b + " has not been abandoned.",
      orginalNumBlocks, blocks.locatedBlockCount() + 1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestAbandonBlock.java

示例3: getListing

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * Get a partial listing of the indicated directory
 *
 * We will stop when any of the following conditions is met:
 * 1) this.lsLimit files have been added
 * 2) needLocation is true AND enough files have been added such
 * that at least this.lsLimit block locations are in the response
 *
 * @param fsd FSDirectory
 * @param iip the INodesInPath instance containing all the INodes along the
 *            path
 * @param src the directory name
 * @param startAfter the name to start listing after
 * @param needLocation if block locations are returned
 * @return a partial listing starting after startAfter
 */
private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
    String src, byte[] startAfter, boolean needLocation, boolean isSuperUser)
    throws IOException {
  String srcs = FSDirectory.normalizePath(src);
  final boolean isRawPath = FSDirectory.isReservedRawName(src);

  fsd.readLock();
  try {
    if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
      return getSnapshotsListing(fsd, srcs, startAfter);
    }
    final int snapshot = iip.getPathSnapshotId();
    final INode targetNode = iip.getLastINode();
    if (targetNode == null)
      return null;
    byte parentStoragePolicy = isSuperUser ?
        targetNode.getStoragePolicyID() : BlockStoragePolicySuite
        .ID_UNSPECIFIED;

    if (!targetNode.isDirectory()) {
      return new DirectoryListing(
          new HdfsFileStatus[]{createFileStatus(fsd, src,
              HdfsFileStatus.EMPTY_NAME, targetNode, needLocation,
              parentStoragePolicy, snapshot, isRawPath, iip)}, 0);
    }

    final INodeDirectory dirInode = targetNode.asDirectory();
    final ReadOnlyList<INode> contents = dirInode.getChildrenList(snapshot);
    int startChild = INodeDirectory.nextChild(contents, startAfter);
    int totalNumChildren = contents.size();
    int numOfListing = Math.min(totalNumChildren - startChild,
        fsd.getLsLimit());
    int locationBudget = fsd.getLsLimit();
    int listingCnt = 0;
    HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
    for (int i=0; i<numOfListing && locationBudget>0; i++) {
      INode cur = contents.get(startChild+i);
      byte curPolicy = isSuperUser && !cur.isSymlink()?
          cur.getLocalStoragePolicyID():
          BlockStoragePolicySuite.ID_UNSPECIFIED;
      listing[i] = createFileStatus(fsd, src, cur.getLocalNameBytes(), cur,
          needLocation, getStoragePolicyID(curPolicy,
              parentStoragePolicy), snapshot, isRawPath, iip);
      listingCnt++;
      if (needLocation) {
          // Once we  hit lsLimit locations, stop.
          // This helps to prevent excessively large response payloads.
          // Approximate #locations with locatedBlockCount() * repl_factor
          LocatedBlocks blks =
              ((HdfsLocatedFileStatus)listing[i]).getBlockLocations();
          locationBudget -= (blks == null) ? 0 :
             blks.locatedBlockCount() * listing[i].getReplication();
      }
    }
    // truncate return array if necessary
    if (listingCnt < numOfListing) {
        listing = Arrays.copyOf(listing, listingCnt);
    }
    return new DirectoryListing(
        listing, totalNumChildren-startChild-listingCnt);
  } finally {
    fsd.readUnlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:81,代码来源:FSDirStatAndListingOp.java

示例4: chooseDatanode

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize, final String excludeDatanodes) throws IOException {
  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  
  HashSet<Node> excludes = new HashSet<Node>();
  if (excludeDatanodes != null) {
    for (String host : StringUtils
        .getTrimmedStringCollection(excludeDatanodes)) {
      int idx = host.indexOf(":");
      if (idx != -1) {          
        excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
            host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
      } else {
        excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
      }
    }
  }

  if (op == PutOpParam.Op.CREATE) {
    //choose a datanode near to client 
    final DatanodeDescriptor clientNode = bm.getDatanodeManager(
        ).getDatanodeByHost(getRemoteAddress());
    if (clientNode != null) {
      final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(
          path, clientNode, excludes, blocksize);
      if (storages.length > 0) {
        return storages[0].getDatanodeDescriptor();
      }
    }
  } else if (op == GetOpParam.Op.OPEN
      || op == GetOpParam.Op.GETFILECHECKSUM
      || op == PostOpParam.Op.APPEND) {
    //choose a datanode containing a replica 
    final NamenodeProtocols np = getRPCServer(namenode);
    final HdfsFileStatus status = np.getFileInfo(path);
    if (status == null) {
      throw new FileNotFoundException("File " + path + " not found.");
    }
    final long len = status.getLen();
    if (op == GetOpParam.Op.OPEN) {
      if (openOffset < 0L || (openOffset >= len && len > 0)) {
        throw new IOException("Offset=" + openOffset
            + " out of the range [0, " + len + "); " + op + ", path=" + path);
      }
    }

    if (len > 0) {
      final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
      final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
      final int count = locations.locatedBlockCount();
      if (count > 0) {
        return bestNode(locations.get(0).getLocations(), excludes);
      }
    }
  } 

  return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
      ).chooseRandom(NodeBase.ROOT);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:62,代码来源:NamenodeWebHdfsMethods.java

示例5: testFileCreationError2

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * Test that the filesystem removes the last block from a file if its
 * lease expires.
 */
@Test
public void testFileCreationError2() throws IOException {
  long leasePeriod = 1000;
  System.out.println("testFileCreationError2 start");
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  // create cluster
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  DistributedFileSystem dfs = null;
  try {
    cluster.waitActive();
    dfs = cluster.getFileSystem();
    DFSClient client = dfs.dfs;

    // create a new file.
    //
    Path file1 = new Path("/filestatus.dat");
    createFile(dfs, file1, 1);
    System.out.println("testFileCreationError2: "
                       + "Created file filestatus.dat with one replicas.");

    LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                file1.toString(), 0, Long.MAX_VALUE);
    System.out.println("testFileCreationError2: "
        + "The file has " + locations.locatedBlockCount() + " blocks.");

    // add one block to the file
    LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
        client.clientName, null, null, INodeId.GRANDFATHER_INODE_ID, null);
    System.out.println("testFileCreationError2: "
        + "Added block " + location.getBlock());

    locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                  0, Long.MAX_VALUE);
    int count = locations.locatedBlockCount();
    System.out.println("testFileCreationError2: "
        + "The file now has " + count + " blocks.");
    
    // set the soft and hard limit to be 1 second so that the
    // namenode triggers lease recovery
    cluster.setLeasePeriod(leasePeriod, leasePeriod);

    // wait for the lease to expire
    try {
      Thread.sleep(5 * leasePeriod);
    } catch (InterruptedException e) {
    }

    // verify that the last block was synchronized.
    locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                  0, Long.MAX_VALUE);
    System.out.println("testFileCreationError2: "
        + "locations = " + locations.locatedBlockCount());
    assertEquals(0, locations.locatedBlockCount());
    System.out.println("testFileCreationError2 successful");
  } finally {
    IOUtils.closeStream(dfs);
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:69,代码来源:TestFileCreation.java

示例6: chooseDatanode

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize, final String excludeDatanodes) throws IOException {
  FSNamesystem fsn = namenode.getNamesystem();
  if (fsn == null) {
    throw new IOException("Namesystem has not been intialized yet.");
  }
  final BlockManager bm = fsn.getBlockManager();
  
  HashSet<Node> excludes = new HashSet<Node>();
  if (excludeDatanodes != null) {
    for (String host : StringUtils
        .getTrimmedStringCollection(excludeDatanodes)) {
      int idx = host.indexOf(":");
      if (idx != -1) {          
        excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
            host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
      } else {
        excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
      }
    }
  }

  if (op == PutOpParam.Op.CREATE) {
    //choose a datanode near to client 
    final DatanodeDescriptor clientNode = bm.getDatanodeManager(
        ).getDatanodeByHost(getRemoteAddress());
    if (clientNode != null) {
      final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(
          path, clientNode, excludes, blocksize);
      if (storages.length > 0) {
        return storages[0].getDatanodeDescriptor();
      }
    }
  } else if (op == GetOpParam.Op.OPEN
      || op == GetOpParam.Op.GETFILECHECKSUM
      || op == PostOpParam.Op.APPEND) {
    //choose a datanode containing a replica 
    final NamenodeProtocols np = getRPCServer(namenode);
    final HdfsFileStatus status = np.getFileInfo(path);
    if (status == null) {
      throw new FileNotFoundException("File " + path + " not found.");
    }
    final long len = status.getLen();
    if (op == GetOpParam.Op.OPEN) {
      if (openOffset < 0L || (openOffset >= len && len > 0)) {
        throw new IOException("Offset=" + openOffset
            + " out of the range [0, " + len + "); " + op + ", path=" + path);
      }
    }

    if (len > 0) {
      final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
      final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
      final int count = locations.locatedBlockCount();
      if (count > 0) {
        return bestNode(locations.get(0).getLocations(), excludes);
      }
    }
  } 

  return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
      ).chooseRandom(NodeBase.ROOT);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:66,代码来源:NamenodeWebHdfsMethods.java

示例7: testFileCreationError2

import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
 * Test that the filesystem removes the last block from a file if its
 * lease expires.
 */
@Test
public void testFileCreationError2() throws IOException {
  long leasePeriod = 1000;
  System.out.println("testFileCreationError2 start");
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  // create cluster
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  DistributedFileSystem dfs = null;
  try {
    cluster.waitActive();
    dfs = cluster.getFileSystem();
    DFSClient client = dfs.dfs;

    // create a new file.
    //
    Path file1 = new Path("/filestatus.dat");
    createFile(dfs, file1, 1);
    System.out.println("testFileCreationError2: "
                       + "Created file filestatus.dat with one replicas.");

    LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                file1.toString(), 0, Long.MAX_VALUE);
    System.out.println("testFileCreationError2: "
        + "The file has " + locations.locatedBlockCount() + " blocks.");

    // add one block to the file
    LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
        client.clientName, null, null, HdfsConstants.GRANDFATHER_INODE_ID, null);
    System.out.println("testFileCreationError2: "
        + "Added block " + location.getBlock());

    locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                  0, Long.MAX_VALUE);
    int count = locations.locatedBlockCount();
    System.out.println("testFileCreationError2: "
        + "The file now has " + count + " blocks.");
    
    // set the soft and hard limit to be 1 second so that the
    // namenode triggers lease recovery
    cluster.setLeasePeriod(leasePeriod, leasePeriod);

    // wait for the lease to expire
    try {
      Thread.sleep(5 * leasePeriod);
    } catch (InterruptedException e) {
    }

    // verify that the last block was synchronized.
    locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                  0, Long.MAX_VALUE);
    System.out.println("testFileCreationError2: "
        + "locations = " + locations.locatedBlockCount());
    assertEquals(0, locations.locatedBlockCount());
    System.out.println("testFileCreationError2 successful");
  } finally {
    IOUtils.closeStream(dfs);
    cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:69,代码来源:TestFileCreation.java


注:本文中的org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。