当前位置: 首页>>代码示例>>Java>>正文


Java ClientProtocol.getBlockLocations方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations方法的典型用法代码示例。如果您正苦于以下问题:Java ClientProtocol.getBlockLocations方法的具体用法?Java ClientProtocol.getBlockLocations怎么用?Java ClientProtocol.getBlockLocations使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.ClientProtocol的用法示例。


在下文中一共展示了ClientProtocol.getBlockLocations方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: callGetBlockLocations

import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入方法依赖的package包/类
/**
 * @see ClientProtocol#getBlockLocations(String, long, long)
 */
static LocatedBlocks callGetBlockLocations(ClientProtocol namenode,
    String src, long start, long length) 
    throws IOException {
  try {
    return namenode.getBlockLocations(src, start, length);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   UnresolvedPathException.class);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:DFSClient.java

示例2: createRedirectURL

import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入方法依赖的package包/类
/** Create a redirection URL */
private URL createRedirectURL(String path, String encodedPath, HdfsFileStatus status, 
    UserGroupInformation ugi, ClientProtocol nnproxy, HttpServletRequest request, String dt)
    throws IOException {
  String scheme = request.getScheme();
  final LocatedBlocks blks = nnproxy.getBlockLocations(
      status.getFullPath(new Path(path)).toUri().getPath(), 0, 1);
  final Configuration conf = NameNodeHttpServer.getConfFromContext(
      getServletContext());
  final DatanodeID host = pickSrcDatanode(blks, status, conf);
  final String hostname;
  if (host instanceof DatanodeInfo) {
    hostname = host.getHostName();
  } else {
    hostname = host.getIpAddr();
  }

  int port = "https".equals(scheme) ? host.getInfoSecurePort() : host
      .getInfoPort();

  String dtParam = "";
  if (dt != null) {
    dtParam = JspHelper.getDelegationTokenUrlParam(dt);
  }

  // Add namenode address to the url params
  NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
      getServletContext());
  String addr = nn.getNameNodeAddressHostPortString();
  String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
  
  return new URL(scheme, hostname, port,
      "/streamFile" + encodedPath + '?' +
      "ugi=" + ServletUtil.encodeQueryValue(ugi.getShortUserName()) +
      dtParam + addrParam);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:FileDataServlet.java

示例3: waitForBlockReplication

import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入方法依赖的package包/类
private void waitForBlockReplication(String filename, 
                                     ClientProtocol namenode,
                                     int expected, long maxWaitSec) 
                                     throws IOException {
  long start = Time.monotonicNow();
  
  //wait for all the blocks to be replicated;
  LOG.info("Checking for block replication for " + filename);
  
  LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
  assertEquals(numBlocks, blocks.locatedBlockCount());
  
  for (int i = 0; i < numBlocks; ++i) {
    LOG.info("Checking for block:" + (i+1));
    while (true) { // Loop to check for block i (usually when 0 is done all will be done
      blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
      assertEquals(numBlocks, blocks.locatedBlockCount());
      LocatedBlock block = blocks.get(i);
      int actual = block.getLocations().length;
      if ( actual == expected ) {
        LOG.info("Got enough replicas for " + (i+1) + "th block " + block.getBlock() +
            ", got " + actual + ".");
        break;
      }
      LOG.info("Not enough replicas for " + (i+1) + "th block " + block.getBlock() +
                             " yet. Expecting " + expected + ", got " + 
                             actual + ".");
    
      if (maxWaitSec > 0 && 
          (Time.monotonicNow() - start) > (maxWaitSec * 1000)) {
        throw new IOException("Timedout while waiting for all blocks to " +
                              " be replicated for " + filename);
      }
    
      try {
        Thread.sleep(500);
      } catch (InterruptedException ignored) {}
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:TestInjectionForSimulatedStorage.java

示例4: getLastLocatedBlock

import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入方法依赖的package包/类
public static LocatedBlock getLastLocatedBlock(
    ClientProtocol namenode, String src) throws IOException {
  //get block info for the last block
  LocatedBlocks locations = namenode.getBlockLocations(src, 0, Long.MAX_VALUE);
  List<LocatedBlock> blocks = locations.getLocatedBlocks();
  DataNode.LOG.info("blocks.size()=" + blocks.size());
  assertTrue(blocks.size() > 0);

  return blocks.get(blocks.size() - 1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestInterDatanodeProtocol.java

示例5: waitForBlockReplication

import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入方法依赖的package包/类
private void waitForBlockReplication(String filename, 
                                     ClientProtocol namenode,
                                     int expected, long maxWaitSec) 
                                     throws IOException {
  long start = Time.monotonicNow();
  
  //wait for all the blocks to be replicated;
  LOG.info("Checking for block replication for " + filename);
  while (true) {
    boolean replOk = true;
    LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, 
                                                      Long.MAX_VALUE);
    
    for (Iterator<LocatedBlock> iter = blocks.getLocatedBlocks().iterator();
         iter.hasNext();) {
      LocatedBlock block = iter.next();
      int actual = block.getLocations().length;
      if ( actual < expected ) {
        LOG.info("Not enough replicas for " + block.getBlock()
            + " yet. Expecting " + expected + ", got " + actual + ".");
        replOk = false;
        break;
      }
    }
    
    if (replOk) {
      return;
    }
    
    if (maxWaitSec > 0 && 
        (Time.monotonicNow() - start) > (maxWaitSec * 1000)) {
      throw new IOException("Timedout while waiting for all blocks to " +
                            " be replicated for " + filename);
    }
    
    try {
      Thread.sleep(500);
    } catch (InterruptedException ignored) {}
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:TestReplication.java

示例6: callGetBlockLocations

import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入方法依赖的package包/类
/**
 * @see ClientProtocol#getBlockLocations(String, long, long)
 */
static LocatedBlocks callGetBlockLocations(ClientProtocol namenode,
    String src, long start, long length)
    throws IOException {
  try {
    return namenode.getBlockLocations(src, start, length);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
        FileNotFoundException.class,
        UnresolvedPathException.class);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:15,代码来源:DFSClient.java

示例7: waitForBlockReplication

import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入方法依赖的package包/类
private void waitForBlockReplication(String filename,
    ClientProtocol namenode,
    int expected, long maxWaitSec,
    boolean isUnderConstruction, boolean noOverReplication)
    throws IOException {
  long start = Time.monotonicNow();
  
  //wait for all the blocks to be replicated;
  LOG.info("Checking for block replication for " + filename);
  while (true) {
    boolean replOk = true;
    LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, 
                                                      Long.MAX_VALUE);
    
    for (Iterator<LocatedBlock> iter = blocks.getLocatedBlocks().iterator();
         iter.hasNext();) {
      LocatedBlock block = iter.next();
      if (isUnderConstruction && !iter.hasNext()) {
        break; // do not check the last block
      }
      int actual = block.getLocations().length;
      if (noOverReplication) {
        assertTrue(actual <= expected);
      }
      if ( actual < expected ) {
        LOG.info("Not enough replicas for " + block.getBlock()
            + " yet. Expecting " + expected + ", got " + actual + ".");
        replOk = false;
        break;
      }
    }
    
    if (replOk) {
      return;
    }
    
    if (maxWaitSec > 0 && 
        (Time.monotonicNow() - start) > (maxWaitSec * 1000)) {
      throw new IOException("Timedout while waiting for all blocks to " +
                            " be replicated for " + filename);
    }
    
    try {
      Thread.sleep(500);
    } catch (InterruptedException ignored) {}
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:48,代码来源:TestReplication.java


注:本文中的org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。