本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.getFileSystemAs方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.getFileSystemAs方法的具体用法?Java DFSTestUtil.getFileSystemAs怎么用?Java DFSTestUtil.getFileSystemAs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.DFSTestUtil
的用法示例。
在下文中一共展示了DFSTestUtil.getFileSystemAs方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testAuditDenied
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/** test that denied operation puts proper entry in audit log */
@Test
public void testAuditDenied() throws Exception {
final Path file = new Path(fnames[0]);
FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
fs.setPermission(file, new FsPermission((short)0600));
fs.setOwner(file, "root", null);
setupAuditLogs();
try {
userfs.open(file);
fail("open must not succeed");
} catch(AccessControlException e) {
System.out.println("got access denied, as expected.");
}
verifyAuditLogs(false);
}
示例2: initCluster
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
private static void initCluster(boolean format) throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(format)
.build();
hdfs = cluster.getFileSystem();
assertTrue(hdfs instanceof DistributedFileSystem);
hdfsAsUser1 = DFSTestUtil.getFileSystemAs(user1, conf);
assertTrue(hdfsAsUser1 instanceof DistributedFileSystem);
hdfsAsUser2 = DFSTestUtil.getFileSystemAs(user2, conf);
assertTrue(hdfsAsUser2 instanceof DistributedFileSystem);
}
示例3: testAuditAllowed
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/** test that allowed operation puts proper entry in audit log */
@Test
public void testAuditAllowed() throws Exception {
final Path file = new Path(fnames[0]);
FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
setupAuditLogs();
InputStream istream = userfs.open(file);
int val = istream.read();
istream.close();
verifyAuditLogs(true);
assertTrue("failed to read from file", val >= 0);
}
示例4: testAuditAllowedStat
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/** test that allowed stat puts proper entry in audit log */
@Test
public void testAuditAllowedStat() throws Exception {
final Path file = new Path(fnames[0]);
FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
setupAuditLogs();
FileStatus st = userfs.getFileStatus(file);
verifyAuditLogs(true);
assertTrue("failed to stat file", st != null && st.isFile());
}
示例5: testListCachePoolPermissions
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testListCachePoolPermissions() throws Exception {
final UserGroupInformation myUser = UserGroupInformation
.createRemoteUser("myuser");
final DistributedFileSystem myDfs =
(DistributedFileSystem)DFSTestUtil.getFileSystemAs(myUser, conf);
final String poolName = "poolparty";
dfs.addCachePool(new CachePoolInfo(poolName)
.setMode(new FsPermission((short)0700)));
// Should only see partial info
RemoteIterator<CachePoolEntry> it = myDfs.listCachePools();
CachePoolInfo info = it.next().getInfo();
assertFalse(it.hasNext());
assertEquals("Expected pool name", poolName, info.getPoolName());
assertNull("Unexpected owner name", info.getOwnerName());
assertNull("Unexpected group name", info.getGroupName());
assertNull("Unexpected mode", info.getMode());
assertNull("Unexpected limit", info.getLimit());
// Modify the pool so myuser is now the owner
final long limit = 99;
dfs.modifyCachePool(new CachePoolInfo(poolName)
.setOwnerName(myUser.getShortUserName())
.setLimit(limit));
// Should see full info
it = myDfs.listCachePools();
info = it.next().getInfo();
assertFalse(it.hasNext());
assertEquals("Expected pool name", poolName, info.getPoolName());
assertEquals("Mismatched owner name", myUser.getShortUserName(),
info.getOwnerName());
assertNotNull("Expected group name", info.getGroupName());
assertEquals("Mismatched mode", (short) 0700,
info.getMode().toShort());
assertEquals("Mismatched limit", limit, (long)info.getLimit());
}
示例6: initCluster
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* Initialize the cluster, wait for it to become active, and get FileSystem
* instances for our test users.
*
* @param format if true, format the NameNode and DataNodes before starting up
* @throws Exception if any step fails
*/
private static void initCluster(boolean format) throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
.build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
fsAsBruce = DFSTestUtil.getFileSystemAs(BRUCE, conf);
fsAsDiana = DFSTestUtil.getFileSystemAs(DIANA, conf);
}
示例7: testListWithDifferentUser
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* Test the listing with different user names to make sure only directories
* that are owned by the user are listed.
*/
@Test (timeout=60000)
public void testListWithDifferentUser() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
// first make dir1 and dir2 snapshottable
hdfs.allowSnapshot(dir1);
hdfs.allowSnapshot(dir2);
hdfs.setPermission(root, FsPermission.valueOf("-rwxrwxrwx"));
// create two dirs and make them snapshottable under the name of user1
UserGroupInformation ugi1 = UserGroupInformation.createUserForTesting(
"user1", new String[] { "group1" });
DistributedFileSystem fs1 = (DistributedFileSystem) DFSTestUtil
.getFileSystemAs(ugi1, conf);
Path dir1_user1 = new Path("/dir1_user1");
Path dir2_user1 = new Path("/dir2_user1");
fs1.mkdirs(dir1_user1);
fs1.mkdirs(dir2_user1);
hdfs.allowSnapshot(dir1_user1);
hdfs.allowSnapshot(dir2_user1);
// user2
UserGroupInformation ugi2 = UserGroupInformation.createUserForTesting(
"user2", new String[] { "group2" });
DistributedFileSystem fs2 = (DistributedFileSystem) DFSTestUtil
.getFileSystemAs(ugi2, conf);
Path dir_user2 = new Path("/dir_user2");
Path subdir_user2 = new Path(dir_user2, "subdir");
fs2.mkdirs(dir_user2);
fs2.mkdirs(subdir_user2);
hdfs.allowSnapshot(dir_user2);
hdfs.allowSnapshot(subdir_user2);
// super user
String supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
UserGroupInformation superUgi = UserGroupInformation.createUserForTesting(
"superuser", new String[] { supergroup });
DistributedFileSystem fs3 = (DistributedFileSystem) DFSTestUtil
.getFileSystemAs(superUgi, conf);
// list the snapshottable dirs for superuser
SnapshottableDirectoryStatus[] dirs = fs3.getSnapshottableDirListing();
// 6 snapshottable dirs: dir1, dir2, dir1_user1, dir2_user1, dir_user2, and
// subdir_user2
assertEquals(6, dirs.length);
// list the snapshottable dirs for user1
dirs = fs1.getSnapshottableDirListing();
// 2 dirs owned by user1: dir1_user1 and dir2_user1
assertEquals(2, dirs.length);
assertEquals(dir1_user1, dirs[0].getFullPath());
assertEquals(dir2_user1, dirs[1].getFullPath());
// list the snapshottable dirs for user2
dirs = fs2.getSnapshottableDirListing();
// 2 dirs owned by user2: dir_user2 and subdir_user2
assertEquals(2, dirs.length);
assertEquals(dir_user2, dirs[0].getFullPath());
assertEquals(subdir_user2, dirs[1].getFullPath());
}
示例8: createFileSystem
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* Creates a FileSystem for a specific user.
*
* @param user UserGroupInformation specific user
* @return FileSystem for specific user
* @throws Exception if creation fails
*/
protected FileSystem createFileSystem(UserGroupInformation user)
throws Exception {
return DFSTestUtil.getFileSystemAs(user, conf);
}
示例9: createFileSystem
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* Creates a FileSystem for a specific user.
*
* @param user UserGroupInformation specific user
* @return FileSystem for specific user
* @throws Exception if creation fails
*/
protected FileSystem createFileSystem(UserGroupInformation user)
throws Exception {
return DFSTestUtil.getFileSystemAs(user, cluster.getConfiguration(0));
}