当前位置: 首页>>代码示例>>Java>>正文


Java NamenodeProtocols类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols的典型用法代码示例。如果您正苦于以下问题:Java NamenodeProtocols类的具体用法?Java NamenodeProtocols怎么用?Java NamenodeProtocols使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


NamenodeProtocols类属于org.apache.hadoop.hdfs.server.protocol包,在下文中一共展示了NamenodeProtocols类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: IPFailoverProxyProvider

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入依赖的package包/类
public IPFailoverProxyProvider(Configuration conf, URI uri,
    Class<T> xface) {
  Preconditions.checkArgument(
      xface.isAssignableFrom(NamenodeProtocols.class),
      "Interface class %s is not a valid NameNode protocol!");
  this.xface = xface;
  this.nameNodeUri = uri;

  this.conf = new Configuration(conf);
  int maxRetries = this.conf.getInt(
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_KEY,
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_DEFAULT);
  this.conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
      maxRetries);
  
  int maxRetriesOnSocketTimeouts = this.conf.getInt(
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT);
  this.conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      maxRetriesOnSocketTimeouts);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:IPFailoverProxyProvider.java

示例2: testOpenFilesWithRename

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入依赖的package包/类
@Test
public void testOpenFilesWithRename() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);

  // check for zero sized blocks
  Path fileWithEmptyBlock = new Path("/test/test/test4");
  fs.create(fileWithEmptyBlock);
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  String clientName = fs.getClient().getClientName();
  // create one empty block
  nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  fs.createSnapshot(path, "s2");

  fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
  fs.delete(new Path("/test/test-renamed"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestOpenFilesWithSnapshot.java

示例3: testAddBlockRetryShouldReturnBlockWithLocations

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入依赖的package包/类
@Test
public void testAddBlockRetryShouldReturnBlockWithLocations()
    throws Exception {
  final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  // create file
  nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName",
      new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
      (short) 3, 1024, null);
  // start first addBlock()
  LOG.info("Starting first addBlock for " + src);
  LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  assertTrue("Block locations should be present",
      lb1.getLocations().length > 0);

  cluster.restartNameNode();
  nameNodeRpc = cluster.getNameNodeRpc();
  LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
  assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestAddBlockRetry.java

示例4: triggerFailure

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入依赖的package包/类
/**
 * go to each block on the 2nd DataNode until it fails...
 * @param path
 * @param size
 * @throws IOException
 */
private void triggerFailure(String path, long size) throws IOException {
  NamenodeProtocols nn = cluster.getNameNodeRpc();
  List<LocatedBlock> locatedBlocks =
    nn.getBlockLocations(path, 0, size).getLocatedBlocks();
  
  for (LocatedBlock lb : locatedBlocks) {
    DatanodeInfo dinfo = lb.getLocations()[1];
    ExtendedBlock b = lb.getBlock();
    try {
      accessBlock(dinfo, lb);
    } catch (IOException e) {
      System.out.println("Failure triggered, on block: " + b.getBlockId() +  
          "; corresponding volume should be removed by now");
      break;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestDataNodeVolumeFailure.java

示例5: countNNBlocks

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入依赖的package包/类
/**
 * Count datanodes that have copies of the blocks for a file
 * put it into the map
 * @param map
 * @param path
 * @param size
 * @return
 * @throws IOException
 */
private int countNNBlocks(Map<String, BlockLocs> map, String path, long size) 
  throws IOException {
  int total = 0;
  
  NamenodeProtocols nn = cluster.getNameNodeRpc();
  List<LocatedBlock> locatedBlocks = 
    nn.getBlockLocations(path, 0, size).getLocatedBlocks();
  //System.out.println("Number of blocks: " + locatedBlocks.size()); 
      
  for(LocatedBlock lb : locatedBlocks) {
    String blockId = ""+lb.getBlock().getBlockId();
    //System.out.print(blockId + ": ");
    DatanodeInfo[] dn_locs = lb.getLocations();
    BlockLocs bl = map.get(blockId);
    if(bl == null) {
      bl = new BlockLocs();
    }
    //System.out.print(dn_info.name+",");
    total += dn_locs.length;        
    bl.num_locs += dn_locs.length;
    map.put(blockId, bl);
    //System.out.println();
  }
  return total;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestDataNodeVolumeFailure.java

示例6: testMkdirRpcNonCanonicalPath

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入依赖的package包/类
/**
 * Regression test for HDFS-3626. Creates a file using a non-canonical path
 * (i.e. with extra slashes between components) and makes sure that the NN
 * rejects it.
 */
@Test
public void testMkdirRpcNonCanonicalPath() throws IOException {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  try {
    NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
    
    for (String pathStr : NON_CANONICAL_PATHS) {
      try {
        nnrpc.mkdirs(pathStr, new FsPermission((short)0755), true);
        fail("Did not fail when called with a non-canonicalized path: "
           + pathStr);
      } catch (InvalidPathException ipe) {
        // expected
      }
    }
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestDFSMkdirs.java

示例7: IPFailoverProxyProvider

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入依赖的package包/类
public IPFailoverProxyProvider(Configuration conf, URI uri,
    Class<T> xface) {
  Preconditions.checkArgument(
      xface.isAssignableFrom(NamenodeProtocols.class),
      "Interface class %s is not a valid NameNode protocol!");
  this.xface = xface;
  this.nameNodeUri = uri;

  this.conf = new Configuration(conf);
  int maxRetries = this.conf.getInt(
      HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_KEY,
      HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_DEFAULT);
  this.conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
      maxRetries);
  
  int maxRetriesOnSocketTimeouts = this.conf.getInt(
      HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT);
  this.conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      maxRetriesOnSocketTimeouts);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:IPFailoverProxyProvider.java

示例8: testHedgingWhenOneIsSlow

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入依赖的package包/类
@Test
public void testHedgingWhenOneIsSlow() throws Exception {
  final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
  Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {
    @Override
    public long[] answer(InvocationOnMock invocation) throws Throwable {
      Thread.sleep(1000);
      return new long[]{1};
    }
  });
  final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
  Mockito.when(badMock.getStats()).thenThrow(new IOException("Bad mock !!"));

  RequestHedgingProxyProvider<NamenodeProtocols> provider =
      new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class,
          createFactory(goodMock, badMock));
  long[] stats = provider.getProxy().proxy.getStats();
  Assert.assertTrue(stats.length == 1);
  Assert.assertEquals(1, stats[0]);
  Mockito.verify(badMock).getStats();
  Mockito.verify(goodMock).getStats();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:TestRequestHedgingProxyProvider.java

示例9: testHedgingWhenBothFail

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入依赖的package包/类
@Test
public void testHedgingWhenBothFail() throws Exception {
  NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
  Mockito.when(badMock.getStats()).thenThrow(new IOException("Bad mock !!"));
  NamenodeProtocols worseMock = Mockito.mock(NamenodeProtocols.class);
  Mockito.when(worseMock.getStats()).thenThrow(
          new IOException("Worse mock !!"));

  RequestHedgingProxyProvider<NamenodeProtocols> provider =
      new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class,
          createFactory(badMock, worseMock));
  try {
    provider.getProxy().proxy.getStats();
    Assert.fail("Should fail since both namenodes throw IOException !!");
  } catch (Exception e) {
    Assert.assertTrue(e instanceof MultiException);
  }
  Mockito.verify(badMock).getStats();
  Mockito.verify(worseMock).getStats();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:21,代码来源:TestRequestHedgingProxyProvider.java

示例10: testOpenFilesWithRename

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入依赖的package包/类
@Test
public void testOpenFilesWithRename() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);

  // check for zero sized blocks
  Path fileWithEmptyBlock = new Path("/test/test/test4");
  fs.create(fileWithEmptyBlock);
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  String clientName = fs.getClient().getClientName();
  // create one empty block
  nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
      HdfsConstants.GRANDFATHER_INODE_ID, null);
  fs.createSnapshot(path, "s2");

  fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
  fs.delete(new Path("/test/test-renamed"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:TestOpenFilesWithSnapshot.java

示例11: testAddBlockRetryShouldReturnBlockWithLocations

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入依赖的package包/类
@Test
public void testAddBlockRetryShouldReturnBlockWithLocations()
    throws Exception {
  final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  // create file
  nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName",
      new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
      (short) 3, 1024, null);
  // start first addBlock()
  LOG.info("Starting first addBlock for " + src);
  LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
      HdfsConstants.GRANDFATHER_INODE_ID, null);
  assertTrue("Block locations should be present",
      lb1.getLocations().length > 0);

  cluster.restartNameNode();
  nameNodeRpc = cluster.getNameNodeRpc();
  LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null,
      HdfsConstants.GRANDFATHER_INODE_ID, null);
  assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
  assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:TestAddBlockRetry.java

示例12: testDFSClientConfigurationLocateFollowingBlockInitialDelay

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入依赖的package包/类
@Test
public void testDFSClientConfigurationLocateFollowingBlockInitialDelay()
    throws Exception {
  // test if HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY
  // is not configured, verify DFSClient uses the default value 400.
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    cluster.waitActive();
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    DFSClient client = new DFSClient(null, nn, conf, null);
    assertEquals(client.getConf().
        getBlockWriteLocateFollowingInitialDelayMs(), 400);

    // change HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY,
    // verify DFSClient uses the configured value 1000.
    conf.setInt(
        HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_KEY,
        1000);
    client = new DFSClient(null, nn, conf, null);
    assertEquals(client.getConf().
        getBlockWriteLocateFollowingInitialDelayMs(), 1000);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:TestDFSClientRetries.java

示例13: testMkdirRpcNonCanonicalPath

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入依赖的package包/类
/**
 * Regression test for HDFS-3626. Creates a file using a non-canonical path
 * (i.e. with extra slashes between components) and makes sure that the NN
 * rejects it.
 */
@Test
public void testMkdirRpcNonCanonicalPath() throws IOException {
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  try {
    NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
    
    for (String pathStr : NON_CANONICAL_PATHS) {
      try {
        nnrpc.mkdirs(pathStr, new FsPermission((short) 0755), true);
        fail("Did not fail when called with a non-canonicalized path: " +
            pathStr);
      } catch (InvalidPathException ipe) {
        // expected
      }
    }
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:26,代码来源:TestDFSMkdirs.java

示例14: createFailoverProxyProvider

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入依赖的package包/类
/** Creates the Failover proxy provider instance*/
@VisibleForTesting
public static <T> FailoverProxyProvider<T> createFailoverProxyProvider(
    Configuration conf, Class<FailoverProxyProvider<T>> failoverProxyProviderClass,
    Class<T> xface, URI nameNodeUri) throws IOException {
  Preconditions.checkArgument(
      xface.isAssignableFrom(NamenodeProtocols.class),
      "Interface %s is not a NameNode protocol", xface);
  try {
    Constructor<FailoverProxyProvider<T>> ctor = failoverProxyProviderClass
        .getConstructor(Configuration.class, URI.class, Class.class);
    FailoverProxyProvider<T> provider = ctor.newInstance(conf, nameNodeUri,
        xface);
    return provider;
  } catch (Exception e) {
    String message = "Couldn't create proxy provider " + failoverProxyProviderClass;
    if (LOG.isDebugEnabled()) {
      LOG.debug(message, e);
    }
    if (e.getCause() instanceof IOException) {
      throw (IOException) e.getCause();
    } else {
      throw new IOException(message, e);
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:27,代码来源:NameNodeProxies.java

示例15: getRPCServer

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入依赖的package包/类
private static NamenodeProtocols getRPCServer(NameNode namenode)
    throws IOException {
   final NamenodeProtocols np = namenode.getRpcServer();
   if (np == null) {
     throw new RetriableException("Namenode is in startup mode");
   }
   return np;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:NamenodeWebHdfsMethods.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。