本文整理汇总了Java中org.apache.hadoop.fs.permission.FsAction类的典型用法代码示例。如果您正苦于以下问题:Java FsAction类的具体用法?Java FsAction怎么用?Java FsAction使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FsAction类属于org.apache.hadoop.fs.permission包,在下文中一共展示了FsAction类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testSetPermission
import org.apache.hadoop.fs.permission.FsAction; //导入依赖的package包/类
@Override
protected void testSetPermission() throws Exception {
if (Path.WINDOWS) {
FileSystem fs = FileSystem.get(getProxiedFSConf());
Path path = new Path(getProxiedFSTestDir(), "foodir");
fs.mkdirs(path);
fs = getHttpFSFileSystem();
FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
fs.setPermission(path, permission1);
fs.close();
fs = FileSystem.get(getProxiedFSConf());
FileStatus status1 = fs.getFileStatus(path);
fs.close();
FsPermission permission2 = status1.getPermission();
Assert.assertEquals(permission2, permission1);
// sticky bit not supported on Windows with local file system, so the
// subclass skips that part of the test
} else {
super.testSetPermission();
}
}
示例2: checkAccessPermissions
import org.apache.hadoop.fs.permission.FsAction; //导入依赖的package包/类
/**
* This method provides the default implementation of
* {@link #access(Path, FsAction)}.
*
* @param stat FileStatus to check
* @param mode type of access to check
* @throws IOException for any error
*/
@InterfaceAudience.Private
static void checkAccessPermissions(FileStatus stat, FsAction mode)
throws IOException {
FsPermission perm = stat.getPermission();
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
String user = ugi.getShortUserName();
List<String> groups = Arrays.asList(ugi.getGroupNames());
if (user.equals(stat.getOwner())) {
if (perm.getUserAction().implies(mode)) {
return;
}
} else if (groups.contains(stat.getGroup())) {
if (perm.getGroupAction().implies(mode)) {
return;
}
} else {
if (perm.getOtherAction().implies(mode)) {
return;
}
}
throw new AccessControlException(String.format(
"Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat.getPath(),
stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));
}
示例3: printExtendedAclEntry
import org.apache.hadoop.fs.permission.FsAction; //导入依赖的package包/类
/**
* Prints a single extended ACL entry. If the mask restricts the
* permissions of the entry, then also prints the restricted version as the
* effective permissions. The mask applies to all named entries and also
* the unnamed group entry.
* @param aclStatus AclStatus for the path
* @param fsPerm FsPermission for the path
* @param entry AclEntry extended ACL entry to print
*/
private void printExtendedAclEntry(AclStatus aclStatus,
FsPermission fsPerm, AclEntry entry) {
if (entry.getName() != null || entry.getType() == AclEntryType.GROUP) {
FsAction entryPerm = entry.getPermission();
FsAction effectivePerm = aclStatus
.getEffectivePermission(entry, fsPerm);
if (entryPerm != effectivePerm) {
out.println(String.format("%s\t#effective:%s", entry,
effectivePerm.SYMBOL));
} else {
out.println(entry);
}
} else {
out.println(entry);
}
}
示例4: testMultipleAclSpecParsing
import org.apache.hadoop.fs.permission.FsAction; //导入依赖的package包/类
@Test
public void testMultipleAclSpecParsing() throws Exception {
List<AclEntry> parsedList = AclEntry.parseAclSpec(
"group::rwx,user:user1:rwx,user:user2:rw-,"
+ "group:group1:rw-,default:group:group1:rw-", true);
AclEntry basicAcl = new AclEntry.Builder().setType(AclEntryType.GROUP)
.setPermission(FsAction.ALL).build();
AclEntry user1Acl = new AclEntry.Builder().setType(AclEntryType.USER)
.setPermission(FsAction.ALL).setName("user1").build();
AclEntry user2Acl = new AclEntry.Builder().setType(AclEntryType.USER)
.setPermission(FsAction.READ_WRITE).setName("user2").build();
AclEntry group1Acl = new AclEntry.Builder().setType(AclEntryType.GROUP)
.setPermission(FsAction.READ_WRITE).setName("group1").build();
AclEntry defaultAcl = new AclEntry.Builder().setType(AclEntryType.GROUP)
.setPermission(FsAction.READ_WRITE).setName("group1")
.setScope(AclEntryScope.DEFAULT).build();
List<AclEntry> expectedList = new ArrayList<AclEntry>();
expectedList.add(basicAcl);
expectedList.add(user1Acl);
expectedList.add(user2Acl);
expectedList.add(group1Acl);
expectedList.add(defaultAcl);
assertEquals("Parsed Acl not correct", expectedList, parsedList);
}
示例5: isPublic
import org.apache.hadoop.fs.permission.FsAction; //导入依赖的package包/类
/**
* Returns a boolean to denote whether a cache file is visible to all (public)
* or not
*
* @return true if the path in the current path is visible to all, false
* otherwise
*/
@Private
public static boolean isPublic(FileSystem fs, Path current, FileStatus sStat,
LoadingCache<Path,Future<FileStatus>> statCache) throws IOException {
current = fs.makeQualified(current);
//the leaf level file should be readable by others
if (!checkPublicPermsForAll(fs, sStat, FsAction.READ_EXECUTE, FsAction.READ)) {
return false;
}
if (Shell.WINDOWS && fs instanceof LocalFileSystem) {
// Relax the requirement for public cache on LFS on Windows since default
// permissions are "700" all the way up to the drive letter. In this
// model, the only requirement for a user is to give EVERYONE group
// permission on the file and the file will be considered public.
// This code path is only hit when fs.default.name is file:/// (mainly
// in tests).
return true;
}
return ancestorsHaveExecutePermissions(fs, current.getParent(), statCache);
}
示例6: checkPublicPermsForAll
import org.apache.hadoop.fs.permission.FsAction; //导入依赖的package包/类
private static boolean checkPublicPermsForAll(FileSystem fs,
FileStatus status, FsAction dir, FsAction file)
throws IOException {
FsPermission perms = status.getPermission();
FsAction otherAction = perms.getOtherAction();
if (status.isDirectory()) {
if (!otherAction.implies(dir)) {
return false;
}
for (FileStatus child : fs.listStatus(status.getPath())) {
if(!checkPublicPermsForAll(fs, child, dir, file)) {
return false;
}
}
return true;
}
return (otherAction.implies(file));
}
示例7: testCreate
import org.apache.hadoop.fs.permission.FsAction; //导入依赖的package包/类
private void testCreate(Path path, boolean override) throws Exception {
FileSystem fs = getHttpFSFileSystem();
FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024,
(short) 2, 100 * 1024 * 1024, null);
os.write(1);
os.close();
fs.close();
fs = FileSystem.get(getProxiedFSConf());
FileStatus status = fs.getFileStatus(path);
if (!isLocalFS()) {
Assert.assertEquals(status.getReplication(), 2);
Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
}
Assert.assertEquals(status.getPermission(), permission);
InputStream is = fs.open(path);
Assert.assertEquals(is.read(), 1);
is.close();
fs.close();
}
示例8: checkSubAccess
import org.apache.hadoop.fs.permission.FsAction; //导入依赖的package包/类
/** Guarded by {@link FSNamesystem#readLock()} */
private void checkSubAccess(byte[][] pathByNameArr, int pathIdx, INode inode,
int snapshotId, FsAction access, boolean ignoreEmptyDir)
throws AccessControlException {
if (inode == null || !inode.isDirectory()) {
return;
}
Stack<INodeDirectory> directories = new Stack<INodeDirectory>();
for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) {
INodeDirectory d = directories.pop();
ReadOnlyList<INode> cList = d.getChildrenList(snapshotId);
if (!(cList.isEmpty() && ignoreEmptyDir)) {
//TODO have to figure this out with inodeattribute provider
check(getINodeAttrs(pathByNameArr, pathIdx, d, snapshotId),
inode.getFullPathName(), access);
}
for(INode child : cList) {
if (child.isDirectory()) {
directories.push(child.asDirectory());
}
}
}
}
示例9: checkPermission
import org.apache.hadoop.fs.permission.FsAction; //导入依赖的package包/类
/**
* Whether a cache pool can be accessed by the current context
*
* @param pool CachePool being accessed
* @param access type of action being performed on the cache pool
* @throws AccessControlException if pool cannot be accessed
*/
public void checkPermission(CachePool pool, FsAction access)
throws AccessControlException {
FsPermission mode = pool.getMode();
if (isSuperUser()) {
return;
}
if (getUser().equals(pool.getOwnerName())
&& mode.getUserAction().implies(access)) {
return;
}
if (getGroups().contains(pool.getGroupName())
&& mode.getGroupAction().implies(access)) {
return;
}
if (mode.getOtherAction().implies(access)) {
return;
}
throw new AccessControlException("Permission denied while accessing pool "
+ pool.getPoolName() + ": user " + getUser() + " does not have "
+ access.toString() + " permissions.");
}
示例10: checkAccess
import org.apache.hadoop.fs.permission.FsAction; //导入依赖的package包/类
/**
* Throw an exception if an action is not permitted by a user on a file.
*
* @param ugi
* the user
* @param file
* the file
* @param action
* the action
*/
public static void checkAccess(UserGroupInformation ugi, FileStatus file,
FsAction action) throws AccessDeniedException {
if (ugi.getShortUserName().equals(file.getOwner())) {
if (file.getPermission().getUserAction().implies(action)) {
return;
}
} else if (contains(ugi.getGroupNames(), file.getGroup())) {
if (file.getPermission().getGroupAction().implies(action)) {
return;
}
} else if (file.getPermission().getOtherAction().implies(action)) {
return;
}
throw new AccessDeniedException("Permission denied:" + " action=" + action
+ " path=" + file.getPath() + " user=" + ugi.getShortUserName());
}
示例11: testAccessGroupMember
import org.apache.hadoop.fs.permission.FsAction; //导入依赖的package包/类
@Test
public void testAccessGroupMember() throws IOException, InterruptedException {
FileSystem rootFs = FileSystem.get(conf);
Path p2 = new Path("/p2");
rootFs.mkdirs(p2);
rootFs.setOwner(p2, UserGroupInformation.getCurrentUser().getShortUserName(), GROUP1_NAME);
rootFs.setPermission(p2, new FsPermission((short) 0740));
fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(conf);
}
});
fs.access(p2, FsAction.READ);
try {
fs.access(p2, FsAction.EXECUTE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
assertTrue("Permission denied messages must carry the username",
e.getMessage().contains(USER1_NAME));
assertTrue("Permission denied messages must carry the path parent",
e.getMessage().contains(
p2.getParent().toUri().getPath()));
}
}
示例12: testAccessOthers
import org.apache.hadoop.fs.permission.FsAction; //导入依赖的package包/类
@Test
public void testAccessOthers() throws IOException, InterruptedException {
FileSystem rootFs = FileSystem.get(conf);
Path p3 = new Path("/p3");
rootFs.mkdirs(p3);
rootFs.setPermission(p3, new FsPermission((short) 0774));
fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(conf);
}
});
fs.access(p3, FsAction.READ);
try {
fs.access(p3, FsAction.READ_WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
assertTrue("Permission denied messages must carry the username",
e.getMessage().contains(USER1_NAME));
assertTrue("Permission denied messages must carry the path parent",
e.getMessage().contains(
p3.getParent().toUri().getPath()));
}
}
示例13: testCheckAccessUrl
import org.apache.hadoop.fs.permission.FsAction; //导入依赖的package包/类
@Test(timeout=60000)
public void testCheckAccessUrl() throws IOException {
Configuration conf = new Configuration();
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("test-user");
UserGroupInformation.setLoginUser(ugi);
WebHdfsFileSystem webhdfs = getWebHdfsFileSystem(ugi, conf);
Path fsPath = new Path("/p1");
URL checkAccessUrl = webhdfs.toUrl(GetOpParam.Op.CHECKACCESS,
fsPath, new FsActionParam(FsAction.READ_WRITE));
checkQueryParams(
new String[]{
GetOpParam.Op.CHECKACCESS.toQueryString(),
new UserParam(ugi.getShortUserName()).toString(),
FsActionParam.NAME + "=" + FsAction.READ_WRITE.SYMBOL
},
checkAccessUrl);
}
示例14: preCheckPermission
import org.apache.hadoop.fs.permission.FsAction; //导入依赖的package包/类
private void preCheckPermission() throws IOException, AccessDeniedException {
if (shouldIgnorePreCheckPermission()) {
return;
}
Path hbaseDir = FSUtils.getRootDir(getConf());
FileSystem fs = hbaseDir.getFileSystem(getConf());
UserProvider userProvider = UserProvider.instantiate(getConf());
UserGroupInformation ugi = userProvider.getCurrent().getUGI();
FileStatus[] files = fs.listStatus(hbaseDir);
for (FileStatus file : files) {
try {
FSUtils.checkAccess(ugi, file, FsAction.WRITE);
} catch (AccessDeniedException ace) {
LOG.warn("Got AccessDeniedException when preCheckPermission ", ace);
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + ugi.getUserName()
+ " does not have write perms to " + file.getPath()
+ ". Please rerun hbck as hdfs user " + file.getOwner());
throw ace;
}
}
}
示例15: assertPermissionDenied
import org.apache.hadoop.fs.permission.FsAction; //导入依赖的package包/类
private void assertPermissionDenied(UserGroupInformation user, String path,
FsAction access) throws IOException {
try {
INodesInPath iip = dir.getINodesInPath(path, true);
dir.getPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(iip,
false, null, null, access, null, false);
fail("expected AccessControlException for user + " + user + ", path = " +
path + ", access = " + access);
} catch (AccessControlException e) {
assertTrue("Permission denied messages must carry the username",
e.getMessage().contains(user.getUserName().toString()));
assertTrue("Permission denied messages must carry the path parent",
e.getMessage().contains(
new Path(path).getParent().toUri().getPath()));
}
}