当前位置: 首页>>代码示例>>Java>>正文


Java FileSystemTestHelper.getTestRootDir方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystemTestHelper.getTestRootDir方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystemTestHelper.getTestRootDir方法的具体用法?Java FileSystemTestHelper.getTestRootDir怎么用?Java FileSystemTestHelper.getTestRootDir使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystemTestHelper的用法示例。


在下文中一共展示了FileSystemTestHelper.getTestRootDir方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setup

import org.apache.hadoop.fs.FileSystemTestHelper; //导入方法依赖的package包/类
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  File testRootDir = new File(testRoot).getAbsoluteFile();
  final Path jksPath = new Path(testRootDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
  );
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem());
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  // Need to set the client's KeyProvider to the NN's for JKS,
  // else the updates do not get flushed properly
  fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem()
      .getProvider());
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestReservedRawPaths.java

示例2: setup

import org.apache.hadoop.fs.FileSystemTestHelper; //导入方法依赖的package包/类
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  // Lower the batch size for testing
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
      2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(fs);
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  setProvider();
  // Create a test key
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestEncryptionZones.java

示例3: setup

import org.apache.hadoop.fs.FileSystemTestHelper; //导入方法依赖的package包/类
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  File testRootDir = new File(testRoot).getAbsoluteFile();
  final Path jksPath = new Path(testRootDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
  );
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem());
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  // Need to set the client's KeyProvider to the NN's for JKS,
  // else the updates do not get flushed properly
  fs.getClient().provider = cluster.getNameNode().getNamesystem()
      .getProvider();
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:25,代码来源:TestReservedRawPaths.java

示例4: setupCluster

import org.apache.hadoop.fs.FileSystemTestHelper; //导入方法依赖的package包/类
@Before
public void setupCluster() throws Exception {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);
  fsHelper = new FileSystemTestHelper();
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" +
      new Path(testRootDir.toString(), "test.jks").toUri()
  );

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .build();
  cluster.waitActive();
  cluster.transitionToActive(0);

  fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf);
  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf);
  dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf);
  KeyProviderCryptoExtension nn0Provider =
      cluster.getNameNode(0).getNamesystem().getProvider();
  fs.getClient().setKeyProvider(nn0Provider);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestEncryptionZonesWithHA.java

示例5: setupCluster

import org.apache.hadoop.fs.FileSystemTestHelper; //导入方法依赖的package包/类
@Before
public void setupCluster() throws Exception {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);
  fsHelper = new FileSystemTestHelper();
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" +
      new Path(testRootDir.toString(), "test.jks").toUri()
  );

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .build();
  cluster.waitActive();
  cluster.transitionToActive(0);

  fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf);
  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf);
  dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf);
  KeyProviderCryptoExtension nn0Provider =
      cluster.getNameNode(0).getNamesystem().getProvider();
  fs.getClient().provider = nn0Provider;
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:32,代码来源:TestEncryptionZonesWithHA.java

示例6: setup

import org.apache.hadoop.fs.FileSystemTestHelper; //导入方法依赖的package包/类
@Before
public void setup() {
  fsHelper = new FileSystemTestHelper();
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:7,代码来源:TestKeyProviderFactory.java

示例7: setup

import org.apache.hadoop.fs.FileSystemTestHelper; //导入方法依赖的package包/类
@BeforeClass
public static void setup() throws Exception {
  String currentUser = System.getProperty("user.name");

  config.set("fs.permissions.umask-mode", "u=rwx,g=,o=");
  config.set(DefaultImpersonationProvider.getTestProvider()
      .getProxySuperuserGroupConfKey(currentUser), "*");
  config.set(DefaultImpersonationProvider.getTestProvider()
      .getProxySuperuserIpConfKey(currentUser), "*");
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  final Path jksPath = new Path(testRootDir.toString(), "test.jks");
  config.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);

  cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
  cluster.waitActive();
  hdfs = cluster.getFileSystem();
  nn = cluster.getNameNode();
  dfsAdmin = new HdfsAdmin(cluster.getURI(), config);

  // Use ephemeral ports in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  // Start NFS with allowed.hosts set to "* rw"
  config.set("dfs.nfs.exports.allowed.hosts", "* rw");
  nfs = new Nfs3(config);
  nfs.startServiceInternal(false);
  nfsd = (RpcProgramNfs3) nfs.getRpcProgram();

  hdfs.getClient().setKeyProvider(nn.getNamesystem().getProvider());
  DFSTestUtil.createKey(TEST_KEY, cluster, config);

  // Mock SecurityHandler which returns system user.name
  securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(currentUser);

  // Mock SecurityHandler which returns a dummy username "harry"
  securityHandlerUnpriviledged = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandlerUnpriviledged.getUser()).thenReturn("harry");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:46,代码来源:TestRpcProgramNfs3.java


注:本文中的org.apache.hadoop.fs.FileSystemTestHelper.getTestRootDir方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。