当前位置: 首页>>代码示例>>Java>>正文


Java ClientDatanodeProtocol.getBlockLocalPathInfo方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol.getBlockLocalPathInfo方法的典型用法代码示例。如果您正苦于以下问题:Java ClientDatanodeProtocol.getBlockLocalPathInfo方法的具体用法?Java ClientDatanodeProtocol.getBlockLocalPathInfo怎么用?Java ClientDatanodeProtocol.getBlockLocalPathInfo使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol的用法示例。


在下文中一共展示了ClientDatanodeProtocol.getBlockLocalPathInfo方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getBlockPathInfo

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入方法依赖的package包/类
private static BlockLocalPathInfo getBlockPathInfo(UserGroupInformation ugi,
    ExtendedBlock blk, DatanodeInfo node, Configuration conf, int timeout,
    Token<BlockTokenIdentifier> token, boolean connectToDnViaHostname)
    throws IOException {
  LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.getIpcPort());
  BlockLocalPathInfo pathinfo = null;
  ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(ugi, node,
      conf, timeout, connectToDnViaHostname);
  try {
    // make RPC to local datanode to find local pathnames of blocks
    pathinfo = proxy.getBlockLocalPathInfo(blk, token);
    if (pathinfo != null) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Cached location of block " + blk + " as " + pathinfo);
      }
      localDatanodeInfo.setBlockLocalPathInfo(blk, pathinfo);
    }
  } catch (IOException e) {
    localDatanodeInfo.resetDatanodeProxy(); // Reset proxy on error
    throw e;
  }
  return pathinfo;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:24,代码来源:BlockReaderLocalLegacy.java

示例2: getBlockPathInfo

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入方法依赖的package包/类
private static BlockLocalPathInfo getBlockPathInfo(ExtendedBlock blk,
    DatanodeInfo node, Configuration conf, int timeout,
    Token<BlockTokenIdentifier> token, boolean connectToDnViaHostname)
    throws IOException {
  LocalDatanodeInfo localDatanodeInfo =
      getLocalDatanodeInfo(node.getIpcPort());
  BlockLocalPathInfo pathinfo = null;
  ClientDatanodeProtocol proxy = localDatanodeInfo
      .getDatanodeProxy(node, conf, timeout, connectToDnViaHostname);
  try {
    // make RPC to local datanode to find local pathnames of blocks
    pathinfo = proxy.getBlockLocalPathInfo(blk, token);
    if (pathinfo != null) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Cached location of block " + blk + " as " + pathinfo);
      }
      localDatanodeInfo.setBlockLocalPathInfo(blk, pathinfo);
    }
  } catch (IOException e) {
    localDatanodeInfo.resetDatanodeProxy(); // Reset proxy on error
    throw e;
  }
  return pathinfo;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:25,代码来源:BlockReaderLocal.java

示例3: getBlockPathInfo

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入方法依赖的package包/类
private static BlockLocalPathInfo getBlockPathInfo(Block blk,
    DatanodeInfo node, Configuration conf, int timeout,
    Token<BlockTokenIdentifier> token) throws IOException {
  LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.ipcPort);
  BlockLocalPathInfo pathinfo = null;
  ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(node,
      conf, timeout);
  try {
    // make RPC to local datanode to find local pathnames of blocks
    pathinfo = proxy.getBlockLocalPathInfo(blk, token);
    if (pathinfo != null) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Cached location of block " + blk + " as " + pathinfo);
      }
      localDatanodeInfo.setBlockLocalPathInfo(blk, pathinfo);
    }
  } catch (IOException e) {
    localDatanodeInfo.resetDatanodeProxy(); // Reset proxy on error
    throw e;
  }
  return pathinfo;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:23,代码来源:BlockReaderLocal.java

示例4: getBlockPathInfo

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入方法依赖的package包/类
private static BlockLocalPathInfo getBlockPathInfo(UserGroupInformation ugi,
    ExtendedBlock blk, DatanodeInfo node, Configuration conf, int timeout,
    Token<BlockTokenIdentifier> token, boolean connectToDnViaHostname,
    StorageType storageType) throws IOException {
  LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.getIpcPort());
  BlockLocalPathInfo pathinfo = null;
  ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(ugi, node,
      conf, timeout, connectToDnViaHostname);
  try {
    // make RPC to local datanode to find local pathnames of blocks
    pathinfo = proxy.getBlockLocalPathInfo(blk, token);
    // We cannot cache the path information for a replica on transient storage.
    // If the replica gets evicted, then it moves to a different path.  Then,
    // our next attempt to read from the cached path would fail to find the
    // file.  Additionally, the failure would cause us to disable legacy
    // short-circuit read for all subsequent use in the ClientContext.  Unlike
    // the newer short-circuit read implementation, we have no communication
    // channel for the DataNode to notify the client that the path has been
    // invalidated.  Therefore, our only option is to skip caching.
    if (pathinfo != null && !storageType.isTransient()) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Cached location of block " + blk + " as " + pathinfo);
      }
      localDatanodeInfo.setBlockLocalPathInfo(blk, pathinfo);
    }
  } catch (IOException e) {
    localDatanodeInfo.resetDatanodeProxy(); // Reset proxy on error
    throw e;
  }
  return pathinfo;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:BlockReaderLocalLegacy.java

示例5: testDeprecatedGetBlockLocalPathInfoRpc

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入方法依赖的package包/类
@Test(timeout=10000)
public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
    LocatedBlocks lb = cluster.getNameNode().getRpcServer()
        .getBlockLocations("/tmp/x", 0, 16);
    // Create a new block object, because the block inside LocatedBlock at
    // namenode is of type BlockInfo.
    ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
    Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
    final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
    ClientDatanodeProtocol proxy = 
        DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
    try {
      proxy.getBlockLocalPathInfo(blk, token);
      Assert.fail("The call should have failed as this user "
          + " is not allowed to call getBlockLocalPathInfo");
    } catch (IOException ex) {
      Assert.assertTrue(ex.getMessage().contains(
          "not allowed to call getBlockLocalPathInfo"));
    }
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestShortCircuitLocalRead.java

示例6: getBlockPathInfo

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入方法依赖的package包/类
private static BlockLocalPathInfo getBlockPathInfo(UserGroupInformation ugi,
    ExtendedBlock blk, DatanodeInfo node, Configuration conf, int timeout,
    Token<BlockTokenIdentifier> token, boolean connectToDnViaHostname,
    StorageType storageType) throws IOException {
  LocalDatanodeInfo localDatanodeInfo =
      getLocalDatanodeInfo(node.getIpcPort());
  BlockLocalPathInfo pathinfo;
  ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(ugi, node,
      conf, timeout, connectToDnViaHostname);
  try {
    // make RPC to local datanode to find local pathnames of blocks
    pathinfo = proxy.getBlockLocalPathInfo(blk, token);
    // We can't cache the path information for a replica on transient storage.
    // If the replica gets evicted, then it moves to a different path.  Then,
    // our next attempt to read from the cached path would fail to find the
    // file.  Additionally, the failure would cause us to disable legacy
    // short-circuit read for all subsequent use in the ClientContext.  Unlike
    // the newer short-circuit read implementation, we have no communication
    // channel for the DataNode to notify the client that the path has been
    // invalidated.  Therefore, our only option is to skip caching.
    if (pathinfo != null && !storageType.isTransient()) {
      LOG.debug("Cached location of block {} as {}", blk, pathinfo);
      localDatanodeInfo.setBlockLocalPathInfo(blk, pathinfo);
    }
  } catch (IOException e) {
    localDatanodeInfo.resetDatanodeProxy(); // Reset proxy on error
    throw e;
  }
  return pathinfo;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:31,代码来源:BlockReaderLocalLegacy.java

示例7: testDeprecatedGetBlockLocalPathInfoRpc

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入方法依赖的package包/类
@Test(timeout=10000)
public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
    LocatedBlocks lb = cluster.getNameNode().getRpcServer()
        .getBlockLocations("/tmp/x", 0, 16);
    // Create a new block object, because the block inside LocatedBlock at
    // namenode is of type BlockInfo.
    ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
    Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
    final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
    ClientDatanodeProtocol proxy = 
        DFSUtilClient.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
    try {
      proxy.getBlockLocalPathInfo(blk, token);
      Assert.fail("The call should have failed as this user "
          + " is not allowed to call getBlockLocalPathInfo");
    } catch (IOException ex) {
      Assert.assertTrue(ex.getMessage().contains(
          "not allowed to call getBlockLocalPathInfo"));
    }
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:32,代码来源:TestShortCircuitLocalRead.java

示例8: testDeprecatedGetBlockLocalPathInfoRpc

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入方法依赖的package包/类
@Test(timeout=10000)
public void testDeprecatedGetBlockLocalPathInfoRpc()
    throws IOException, InterruptedException {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
    LocatedBlocks lb = cluster.getNameNode().getRpcServer()
        .getBlockLocations("/tmp/x", 0, 16);
    // Create a new block object, because the block inside LocatedBlock at
    // namenode is of type BlockInfo.
    ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
    Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
    final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
    ClientDatanodeProtocol proxy = 
        DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
    try {
      proxy.getBlockLocalPathInfo(blk, token);
      Assert.fail("The call should have failed as this user "
          + " is not allowed to call getBlockLocalPathInfo");
    } catch (IOException ex) {
      Assert.assertTrue(ex.getMessage().contains(
          "not allowed to call getBlockLocalPathInfo"));
    }
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:33,代码来源:TestShortCircuitLocalRead.java

示例9: testGetBlockLocalPathInfo

import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; //导入方法依赖的package包/类
@Test
public void testGetBlockLocalPathInfo() throws IOException, InterruptedException {
  final Configuration conf = new Configuration();
  conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, "alloweduser");
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  cluster.waitActive();
  final DataNode dn = cluster.getDataNodes().get(0);
  FileSystem fs = cluster.getFileSystem();
  try {
    DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
    UserGroupInformation aUgi = UserGroupInformation
        .createRemoteUser("alloweduser");
    LocatedBlocks lb = cluster.getNameNode().getBlockLocations("/tmp/x", 0,
        16);
    // Create a new block object, because the block inside LocatedBlock at
    // namenode is of type BlockInfo.
    Block blk = new Block(lb.get(0).getBlock());
    Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
    final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
    ClientDatanodeProtocol proxy = aUgi
        .doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
          @Override
          public ClientDatanodeProtocol run() throws Exception {
            return DFSClient.createClientDatanodeProtocolProxy(
                dnInfo, conf, 60000);
          }
        });
    
    //This should succeed
    BlockLocalPathInfo blpi = proxy.getBlockLocalPathInfo(blk, token);
    Assert.assertEquals(dn.data.getBlockLocalPathInfo(blk).getBlockPath(),
        blpi.getBlockPath());
    RPC.stopProxy(proxy);

    // Now try with a not allowed user.
    UserGroupInformation bUgi = UserGroupInformation
        .createRemoteUser("notalloweduser");
    proxy = bUgi
        .doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
          @Override
          public ClientDatanodeProtocol run() throws Exception {
            return DFSClient.createClientDatanodeProtocolProxy(
                dnInfo, conf, 60000);
          }
        });
    try {
      proxy.getBlockLocalPathInfo(blk, token);
      Assert.fail("The call should have failed as " + bUgi.getShortUserName()
          + " is not allowed to call getBlockLocalPathInfo");
    } catch (IOException ex) {
      Assert.assertTrue(ex.getMessage().contains(
          "not allowed to call getBlockLocalPathInfo"));
    } finally {
      RPC.stopProxy(proxy);
    }
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:61,代码来源:TestShortCircuitLocalRead.java


注:本文中的org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol.getBlockLocalPathInfo方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。