当前位置: 首页>>代码示例>>Java>>正文


Java RpcProgramNfs3类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3的典型用法代码示例。如果您正苦于以下问题:Java RpcProgramNfs3类的具体用法?Java RpcProgramNfs3怎么用?Java RpcProgramNfs3使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


RpcProgramNfs3类属于org.apache.hadoop.hdfs.nfs.nfs3包,在下文中一共展示了RpcProgramNfs3类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testStart

import org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3; //导入依赖的package包/类
@Test
public void testStart() throws IOException {
  // Start minicluster
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1)
      .build();
  cluster.waitActive();
  
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);
  
  // Start nfs
  Nfs3 nfs3 = new Nfs3(config);
  nfs3.startServiceInternal(false);

  RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd()
      .getRpcProgram();
  mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));
  
  RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
  nfsd.nullProcedure();
  
  cluster.shutdown();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestMountd.java

示例2: testStart

import org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3; //导入依赖的package包/类
@Test
public void testStart() throws IOException {
  // Start minicluster
  Configuration config = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1)
      .manageNameDfsDirs(false).build();
  cluster.waitActive();
  
  // Start nfs
  List<String> exports = new ArrayList<String>();
  exports.add("/");
  Nfs3 nfs3 = new Nfs3(exports, config);
  nfs3.start(false);

  RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountBase()
      .getRpcProgram();
  mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));
  
  RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
  nfsd.nullProcedure();
  
  cluster.shutdown();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:24,代码来源:TestMountd.java

示例3: setup

import org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3; //导入依赖的package包/类
@BeforeClass
public static void setup() throws Exception {
  String currentUser = System.getProperty("user.name");
  config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser), "*");
  config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser), "*");
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
  cluster.waitActive();
  hdfs = cluster.getFileSystem();
  nn = cluster.getNameNode();

  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);
  
  // Start nfs
  Nfs3 nfs3 = new Nfs3(config);
  nfs3.startServiceInternal(false);

  nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();

  securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser())
      .thenReturn(System.getProperty("user.name"));
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:26,代码来源:TestReaddir.java

示例4: testStart

import org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3; //导入依赖的package包/类
@Test
public void testStart() throws IOException {
  // Start minicluster
  Configuration config = new Configuration();
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(config).numDataNodes(1).build();
  cluster.waitActive();
  
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);
  
  // Start nfs
  Nfs3 nfs3 = new Nfs3(config);
  nfs3.startServiceInternal(false);

  RpcProgramMountd mountd =
      (RpcProgramMountd) nfs3.getMountd().getRpcProgram();
  mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));
  
  RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
  nfsd.nullProcedure();
  
  cluster.shutdown();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:26,代码来源:TestMountd.java

示例5: testStart

import org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3; //导入依赖的package包/类
@Test
public void testStart() throws IOException {
  // Start minicluster
  Configuration config = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1)
      .build();
  cluster.waitActive();
  
  // Start nfs
  List<String> exports = new ArrayList<String>();
  exports.add("/");
  Nfs3 nfs3 = new Nfs3(exports, config);
  nfs3.start(false);

  RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountBase()
      .getRpcProgram();
  mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));
  
  RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
  nfsd.nullProcedure();
  
  cluster.shutdown();
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:24,代码来源:TestMountd.java

示例6: testStart

import org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3; //导入依赖的package包/类
@Test
public void testStart() throws IOException {
  // Start minicluster
  Configuration config = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1)
      .build();
  cluster.waitActive();
  
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);
  
  // Start nfs
  Nfs3 nfs3 = new Nfs3(config);
  nfs3.startServiceInternal(false);

  RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd()
      .getRpcProgram();
  mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));
  
  RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
  nfsd.nullProcedure();
  
  cluster.shutdown();
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:26,代码来源:TestMountd.java

示例7: setup

import org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3; //导入依赖的package包/类
@BeforeClass
public static void setup() throws Exception {
  String currentUser = System.getProperty("user.name");
  config.set(
          DefaultImpersonationProvider.getTestProvider().
              getProxySuperuserGroupConfKey(currentUser), "*");
  config.set(
          DefaultImpersonationProvider.getTestProvider().
              getProxySuperuserIpConfKey(currentUser), "*");
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
  cluster.waitActive();
  hdfs = cluster.getFileSystem();
  nn = cluster.getNameNode();

  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);
  
  // Start nfs
  Nfs3 nfs3 = new Nfs3(config);
  nfs3.startServiceInternal(false);

  nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();

  securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(
      System.getProperty("user.name"));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestReaddir.java

示例8: setup

import org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3; //导入依赖的package包/类
@BeforeClass
public static void setup() throws Exception {
  String currentUser = System.getProperty("user.name");
  config.set(
          ProxyUsers.getProxySuperuserGroupConfKey(currentUser),
          "*");
  config.set(
          ProxyUsers.getProxySuperuserIpConfKey(currentUser),
          "*");
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
  cluster.waitActive();
  hdfs = cluster.getFileSystem();
  nn = cluster.getNameNode();

  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);
  
  // Start nfs
  Nfs3 nfs3 = new Nfs3(config);
  nfs3.startServiceInternal(false);

  nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();

  securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(
      System.getProperty("user.name"));
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:30,代码来源:TestReaddir.java


注:本文中的org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。