本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.setPermission方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.setPermission方法的具体用法?Java FileSystem.setPermission怎么用?Java FileSystem.setPermission使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FileSystem
的用法示例。
在下文中一共展示了FileSystem.setPermission方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testJksProvider
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testJksProvider() throws Exception {
Configuration conf = new Configuration();
final Path jksPath = new Path(tmpDir.toString(), "test.jks");
final String ourUrl =
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(tmpDir, "test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
checkSpecificProvider(conf, ourUrl);
Path path = ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
assertTrue(s.getPermission().toString().equals("rwx------"));
assertTrue(file + " should exist", file.isFile());
// check permission retention after explicit change
fs.setPermission(path, new FsPermission("777"));
checkPermissionRetention(conf, ourUrl, path);
}
示例2: testLocalJksProvider
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testLocalJksProvider() throws Exception {
Configuration conf = new Configuration();
final Path jksPath = new Path(tmpDir.toString(), "test.jks");
final String ourUrl =
LocalJavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(tmpDir, "test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
checkSpecificProvider(conf, ourUrl);
Path path = ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
assertTrue("Unexpected permissions: " + s.getPermission().toString(), s.getPermission().toString().equals("rwx------"));
assertTrue(file + " should exist", file.isFile());
// check permission retention after explicit change
fs.setPermission(path, new FsPermission("777"));
checkPermissionRetention(conf, ourUrl, path);
}
示例3: testSetPermission
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
protected void testSetPermission() throws Exception {
if (Path.WINDOWS) {
FileSystem fs = FileSystem.get(getProxiedFSConf());
Path path = new Path(getProxiedFSTestDir(), "foodir");
fs.mkdirs(path);
fs = getHttpFSFileSystem();
FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
fs.setPermission(path, permission1);
fs.close();
fs = FileSystem.get(getProxiedFSConf());
FileStatus status1 = fs.getFileStatus(path);
fs.close();
FsPermission permission2 = status1.getPermission();
Assert.assertEquals(permission2, permission1);
// sticky bit not supported on Windows with local file system, so the
// subclass skips that part of the test
} else {
super.testSetPermission();
}
}
示例4: createTestSetup
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static String createTestSetup(String baseDir, FileSystem fs, FsPermission perm) throws IOException {
String base = getBase(baseDir);
fs.mkdirs(new Path(base + "/newTest/hello/world1"));
fs.mkdirs(new Path(base + "/newTest/hello/world2/newworld"));
fs.mkdirs(new Path(base + "/newTest/hello/world3/oldworld"));
fs.setPermission(new Path(base + "/newTest"), perm);
fs.setPermission(new Path(base + "/newTest/hello"), perm);
fs.setPermission(new Path(base + "/newTest/hello/world1"), perm);
fs.setPermission(new Path(base + "/newTest/hello/world2"), perm);
fs.setPermission(new Path(base + "/newTest/hello/world2/newworld"), perm);
fs.setPermission(new Path(base + "/newTest/hello/world3"), perm);
fs.setPermission(new Path(base + "/newTest/hello/world3/oldworld"), perm);
createFile(fs, new Path(base, "/newTest/1"));
createFile(fs, new Path(base, "/newTest/hello/2"));
createFile(fs, new Path(base, "/newTest/hello/world3/oldworld/3"));
createFile(fs, new Path(base, "/newTest/hello/world2/4"));
return base;
}
示例5: createTestSetup
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static String createTestSetup(String baseDir,
FileSystem fs,
FsPermission perm) throws IOException {
String base = getBase(baseDir);
fs.mkdirs(new Path(base + "/newTest/hello/world1"));
fs.mkdirs(new Path(base + "/newTest/hello/world2/newworld"));
fs.mkdirs(new Path(base + "/newTest/hello/world3/oldworld"));
fs.setPermission(new Path(base + "/newTest"), perm);
fs.setPermission(new Path(base + "/newTest/hello"), perm);
fs.setPermission(new Path(base + "/newTest/hello/world1"), perm);
fs.setPermission(new Path(base + "/newTest/hello/world2"), perm);
fs.setPermission(new Path(base + "/newTest/hello/world2/newworld"), perm);
fs.setPermission(new Path(base + "/newTest/hello/world3"), perm);
fs.setPermission(new Path(base + "/newTest/hello/world3/oldworld"), perm);
createFile(fs, new Path(base, "/newTest/1"));
createFile(fs, new Path(base, "/newTest/hello/2"));
createFile(fs, new Path(base, "/newTest/hello/world3/oldworld/3"));
createFile(fs, new Path(base, "/newTest/hello/world2/4"));
return base;
}
示例6: updateDestStatus
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static void updateDestStatus(FileStatus src, FileStatus dst,
EnumSet<FileAttribute> preseved, FileSystem destFileSys
) throws IOException {
String owner = null;
String group = null;
if (preseved.contains(FileAttribute.USER)
&& !src.getOwner().equals(dst.getOwner())) {
owner = src.getOwner();
}
if (preseved.contains(FileAttribute.GROUP)
&& !src.getGroup().equals(dst.getGroup())) {
group = src.getGroup();
}
if (owner != null || group != null) {
destFileSys.setOwner(dst.getPath(), owner, group);
}
if (preseved.contains(FileAttribute.PERMISSION)
&& !src.getPermission().equals(dst.getPermission())) {
destFileSys.setPermission(dst.getPath(), src.getPermission());
}
if (preseved.contains(FileAttribute.TIMES)) {
destFileSys.setTimes(dst.getPath(), src.getModificationTime(), src.getAccessTime());
}
}
示例7: confirmSettingAndGetting
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* Test basic ability to get and set sticky bits on files and directories.
*/
private void confirmSettingAndGetting(FileSystem hdfs, Path p, Path baseDir)
throws IOException {
// Initially sticky bit should not be set
assertFalse(hdfs.getFileStatus(p).getPermission().getStickyBit());
// Same permission, but with sticky bit on
short withSB;
withSB = (short) (hdfs.getFileStatus(p).getPermission().toShort() | 01000);
assertTrue((new FsPermission(withSB)).getStickyBit());
hdfs.setPermission(p, new FsPermission(withSB));
assertTrue(hdfs.getFileStatus(p).getPermission().getStickyBit());
// Write a file to the fs, try to set its sticky bit
Path f = new Path(baseDir, "somefile");
writeFile(hdfs, f);
assertFalse(hdfs.getFileStatus(f).getPermission().getStickyBit());
withSB = (short) (hdfs.getFileStatus(f).getPermission().toShort() | 01000);
hdfs.setPermission(f, new FsPermission(withSB));
assertTrue(hdfs.getFileStatus(f).getPermission().getStickyBit());
}
示例8: checkExists
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private boolean checkExists(FileSystem fs, Path path, FsPermission fsPerm)
throws IOException {
boolean exists = true;
try {
FileStatus appDirStatus = fs.getFileStatus(path);
if (!APP_DIR_PERMISSIONS.equals(appDirStatus.getPermission())) {
fs.setPermission(path, APP_DIR_PERMISSIONS);
}
} catch (FileNotFoundException fnfe) {
exists = false;
}
return exists;
}
示例9: testCheckHasPermission
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testCheckHasPermission() throws Exception {
getSabotContext().getCatalogService().refreshSource(new NamespaceKey("hive"), CatalogService.REFRESH_EVERYTHING_NOW);
NamespaceService ns = getSabotContext().getNamespaceService(SystemUser.SYSTEM_USERNAME);
NamespaceKey dataset = new NamespaceKey(PathUtils.parseFullPath("hive.db1.kv_db1"));
DatasetConfig datasetConfig = ns.getDataset(dataset);
assertTrue(getSabotContext().getCatalogService().getStoragePlugin("hive").hasAccessPermission(ImpersonationUtil.getProcessUserName(), dataset, datasetConfig));
final Path tableFile = new Path(hiveTest.getWhDir() + "/db1.db/kv_db1/000000_0");
final Path tableDir = new Path(hiveTest.getWhDir() + "/db1.db/kv_db1");
final FileSystem localFs = FileSystem.getLocal(new Configuration());
try {
// no read on file
localFs.setPermission(tableFile, new FsPermission(FsAction.WRITE_EXECUTE, FsAction.WRITE_EXECUTE, FsAction.WRITE_EXECUTE));
assertFalse(getSabotContext().getCatalogService().getStoragePlugin("hive").hasAccessPermission(ImpersonationUtil.getProcessUserName(), dataset, datasetConfig));
} finally {
localFs.setPermission(tableFile, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
}
try {
// no exec on dir
localFs.setPermission(tableDir, new FsPermission(FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.READ_WRITE));
assertFalse(getSabotContext().getCatalogService().getStoragePlugin("hive").hasAccessPermission(ImpersonationUtil.getProcessUserName(), dataset, datasetConfig));
} finally {
localFs.setPermission(tableDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
}
}
示例10: testPreserveReplicationOnFile
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testPreserveReplicationOnFile() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.REPLICATION);
Path dst = new Path("/tmp/dest2");
Path src = new Path("/tmp/src2");
createFile(fs, src);
createFile(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setTimes(dst, 100, 100);
fs.setReplication(dst, (short) 2);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
}
示例11: testPreserveDefaults
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testPreserveDefaults() throws IOException {
FileSystem fs = FileSystem.get(config);
// preserve replication, block size, user, group, permission,
// checksum type and timestamps
EnumSet<FileAttribute> attributes =
DistCpUtils.unpackAttributes(
DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT.substring(1));
Path dst = new Path("/tmp/dest2");
Path src = new Path("/tmp/src2");
createFile(fs, src);
createFile(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setTimes(dst, 100, 100);
fs.setReplication(dst, (short) 2);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertTrue(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertTrue(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertTrue(srcStatus.getGroup().equals(dstStatus.getGroup()));
Assert.assertTrue(srcStatus.getAccessTime() == dstStatus.getAccessTime());
Assert.assertTrue(srcStatus.getModificationTime() == dstStatus.getModificationTime());
Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
}
示例12: startCluster
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void startCluster(Configuration conf) throws Exception {
if (System.getProperty("hadoop.log.dir") == null) {
System.setProperty("hadoop.log.dir", "target/test-dir");
}
conf.set("dfs.block.access.token.enable", "false");
conf.set("dfs.permissions", "true");
conf.set("hadoop.security.authentication", "simple");
String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
StringUtils.join(",",
YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH))
+ File.pathSeparator + classpathDir;
conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
dfsCluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSystem = dfsCluster.getFileSystem();
fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user"));
fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
fileSystem.setPermission(
new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(
new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(
new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
FileSystem.setDefaultUri(conf, fileSystem.getUri());
mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);
// so the minicluster conf is avail to the containers.
Writer writer = new FileWriter(classpathDir + "/core-site.xml");
mrCluster.getConfig().writeXml(writer);
writer.close();
}
示例13: testPreserveReplicationOnDirectory
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testPreserveReplicationOnDirectory() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.REPLICATION);
Path dst = new Path("/tmp/abc");
Path src = new Path("/tmp/src");
createDirectory(fs, src);
createDirectory(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setReplication(src, (short) 1);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setReplication(dst, (short) 2);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
// Replication shouldn't apply to dirs so this should still be 0 == 0
Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
}
示例14: setUp
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
protected void setUp() throws Exception {
super.setUp();
if (System.getProperty("hadoop.log.dir") == null) {
System.setProperty("hadoop.log.dir", "/tmp");
}
int taskTrackers = 2;
int dataNodes = 2;
String proxyUser = System.getProperty("user.name");
String proxyGroup = "g";
StringBuilder sb = new StringBuilder();
sb.append("127.0.0.1,localhost");
for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) {
sb.append(",").append(i.getCanonicalHostName());
}
JobConf conf = new JobConf();
conf.set("dfs.block.access.token.enable", "false");
conf.set("dfs.permissions", "true");
conf.set("hadoop.security.authentication", "simple");
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(dataNodes)
.build();
FileSystem fileSystem = dfsCluster.getFileSystem();
fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user"));
fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
String nnURI = fileSystem.getUri().toString();
int numDirs = 1;
String[] racks = null;
String[] hosts = null;
mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}
示例15: createNonexistingDirectory
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/** Create a directory. */
static boolean createNonexistingDirectory(FileSystem fs, Path dir) throws IOException {
if (fs.exists(dir)) {
Util.err.println("dir (= " + dir + ") already exists.");
return false;
} else if (!fs.mkdirs(dir)) {
throw new IOException("Cannot create working directory " + dir);
}
fs.setPermission(dir, new FsPermission((short)0777));
return true;
}