当前位置: 首页>>代码示例>>Java>>正文


Java FileSystemTestHelper类代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystemTestHelper的典型用法代码示例。如果您正苦于以下问题:Java FileSystemTestHelper类的具体用法?Java FileSystemTestHelper怎么用?Java FileSystemTestHelper使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


FileSystemTestHelper类属于org.apache.hadoop.fs包,在下文中一共展示了FileSystemTestHelper类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setUp

import org.apache.hadoop.fs.FileSystemTestHelper; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  initializeTargetTestRoot();
  
  // Make  user and data dirs - we creates links to them in the mount table
  fsTarget.mkdirs(new Path(targetTestRoot,"user"));
  fsTarget.mkdirs(new Path(targetTestRoot,"data"));
  fsTarget.mkdirs(new Path(targetTestRoot,"dir2"));
  fsTarget.mkdirs(new Path(targetTestRoot,"dir3"));
  FileSystemTestHelper.createFile(fsTarget, new Path(targetTestRoot,"aFile"));
  
  
  // Now we use the mount fs to set links to user and dir
  // in the test root
  
  // Set up the defaultMT in the config with our mount point links
  conf = ViewFileSystemTestSetup.createConfig();
  setupMountPoints();
  fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:21,代码来源:ViewFileSystemBaseTest.java

示例2: testGetBlockLocations

import org.apache.hadoop.fs.FileSystemTestHelper; //导入依赖的package包/类
@Test
public void testGetBlockLocations() throws IOException {
  Path targetFilePath = new Path(targetTestRoot,"data/largeFile");
  FileSystemTestHelper.createFile(fsTarget, 
      targetFilePath, 10, 1024);
  Path viewFilePath = new Path("/data/largeFile");
  Assert.assertTrue("Created File should be type File",
      fsView.isFile(viewFilePath));
  BlockLocation[] viewBL = fsView.getFileBlockLocations(fsView.getFileStatus(viewFilePath), 0, 10240+100);
  Assert.assertEquals(SupportsBlocks ? 10 : 1, viewBL.length);
  BlockLocation[] targetBL = fsTarget.getFileBlockLocations(fsTarget.getFileStatus(targetFilePath), 0, 10240+100);
  compareBLs(viewBL, targetBL);
  
  
  // Same test but now get it via the FileStatus Parameter
  fsView.getFileBlockLocations(
      fsView.getFileStatus(viewFilePath), 0, 10240+100);
  targetBL = fsTarget.getFileBlockLocations(
      fsTarget.getFileStatus(targetFilePath), 0, 10240+100);
  compareBLs(viewBL, targetBL);  
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:22,代码来源:ViewFileSystemBaseTest.java

示例3: setUp

import org.apache.hadoop.fs.FileSystemTestHelper; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  // create the test root on local_fs
  Configuration conf = new Configuration();
  fSysTarget = FileSystem.getLocal(conf);
  fileSystemTestHelper = new FileSystemTestHelper();
  chrootedTo = fileSystemTestHelper.getAbsoluteTestRootPath(fSysTarget);
  // In case previous test was killed before cleanup
  fSysTarget.delete(chrootedTo, true);
  
  fSysTarget.mkdirs(chrootedTo);


  // ChRoot to the root of the testDirectory
  fSys = new ChRootedFileSystem(chrootedTo.toUri(), conf);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:17,代码来源:TestChRootedFileSystem.java

示例4: testTruncate

import org.apache.hadoop.fs.FileSystemTestHelper; //导入依赖的package包/类
private void testTruncate() throws Exception {
  if (!isLocalFS()) {
    final short repl = 3;
    final int blockSize = 1024;
    final int numOfBlocks = 2;
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    fs.mkdirs(getProxiedFSTestDir());
    Path file = new Path(getProxiedFSTestDir(), "foo.txt");
    final byte[] data = FileSystemTestHelper.getFileData(
        numOfBlocks, blockSize);
    FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

    final int newLength = blockSize;

    boolean isReady = fs.truncate(file, newLength);
    Assert.assertTrue("Recovery is not expected.", isReady);

    FileStatus fileStatus = fs.getFileStatus(file);
    Assert.assertEquals(fileStatus.getLen(), newLength);
    AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

    fs.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:BaseTestHttpFSWith.java

示例5: setup

import org.apache.hadoop.fs.FileSystemTestHelper; //导入依赖的package包/类
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  File testRootDir = new File(testRoot).getAbsoluteFile();
  final Path jksPath = new Path(testRootDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
  );
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem());
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  // Need to set the client's KeyProvider to the NN's for JKS,
  // else the updates do not get flushed properly
  fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem()
      .getProvider());
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestReservedRawPaths.java

示例6: setup

import org.apache.hadoop.fs.FileSystemTestHelper; //导入依赖的package包/类
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  // Lower the batch size for testing
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
      2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(fs);
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  setProvider();
  // Create a test key
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestEncryptionZones.java

示例7: setUp

import org.apache.hadoop.fs.FileSystemTestHelper; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  initializeTargetTestRoot();
  
  // Make  user and data dirs - we creates links to them in the mount table
  fsTarget.mkdirs(new Path(targetTestRoot,"user"));
  fsTarget.mkdirs(new Path(targetTestRoot,"data"));
  fsTarget.mkdirs(new Path(targetTestRoot,"dir2"));
  fsTarget.mkdirs(new Path(targetTestRoot,"dir3"));
  FileSystemTestHelper.createFile(fsTarget, new Path(targetTestRoot,"aFile"));
  
  
  // Now we use the mount fs to set links to user and dir
  // in the test root
  
  // Set up the defaultMT in the config with our mount point links
  //Configuration conf = new Configuration();
  conf = ViewFileSystemTestSetup.createConfig();
  setupMountPoints();
  fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:22,代码来源:ViewFileSystemBaseTest.java

示例8: setup

import org.apache.hadoop.fs.FileSystemTestHelper; //导入依赖的package包/类
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  File testRootDir = new File(testRoot).getAbsoluteFile();
  final Path jksPath = new Path(testRootDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
  );
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem());
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  // Need to set the client's KeyProvider to the NN's for JKS,
  // else the updates do not get flushed properly
  fs.getClient().provider = cluster.getNameNode().getNamesystem()
      .getProvider();
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:25,代码来源:TestReservedRawPaths.java

示例9: initialize

import org.apache.hadoop.fs.FileSystemTestHelper; //导入依赖的package包/类
@Before
public void initialize() throws Exception {
  conf = new Configuration(false);
  conf.set("fs.file.impl", LocalFileSystem.class.getName());
  fs = FileSystem.getLocal(conf);
  testDir = new FileSystemTestHelper().getTestRootPath(fs);
  // don't want scheme on the path, just an absolute path
  testDir = new Path(fs.makeQualified(testDir).toUri().getPath());

  FileSystem.setDefaultUri(conf, fs.getUri());
  fs.setWorkingDirectory(testDir);
  fs.mkdirs(DIR_FROM);
  fs.mkdirs(DIR_TO1);
  fs.createNewFile(FROM);

  FSDataOutputStream output = fs.create(FROM, true);
  for(int i = 0; i < 100; ++i) {
      output.writeInt(i);
      output.writeChar('\n');
  }
  output.close();
  fs.setTimes(FROM, MODIFICATION_TIME, ACCESS_TIME);
  fs.setPermission(FROM, PERMISSIONS);
  fs.setTimes(DIR_FROM, MODIFICATION_TIME, ACCESS_TIME);
  fs.setPermission(DIR_FROM, PERMISSIONS);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:27,代码来源:TestCopyPreserveFlag.java


注:本文中的org.apache.hadoop.fs.FileSystemTestHelper类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。