本文整理匯總了Java中org.apache.hadoop.security.UserGroupInformation.createUserForTesting方法的典型用法代碼示例。如果您正苦於以下問題:Java UserGroupInformation.createUserForTesting方法的具體用法?Java UserGroupInformation.createUserForTesting怎麽用?Java UserGroupInformation.createUserForTesting使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.security.UserGroupInformation
的用法示例。
在下文中一共展示了UserGroupInformation.createUserForTesting方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: checkFile
import org.apache.hadoop.security.UserGroupInformation; //導入方法依賴的package包/類
static void checkFile(Path p, int expectedsize, final Configuration conf
) throws IOException, InterruptedException {
//open the file with another user account
final String username = UserGroupInformation.getCurrentUser().getShortUserName()
+ "_" + ++userCount;
UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username,
new String[] {"supergroup"});
final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);
//Check visible length
Assert.assertTrue(in.getVisibleLength() >= expectedsize);
//Able to read?
for(int i = 0; i < expectedsize; i++) {
Assert.assertEquals((byte)i, (byte)in.read());
}
in.close();
}
示例2: testUnreadableBySuperuserXAttr
import org.apache.hadoop.security.UserGroupInformation; //導入方法依賴的package包/類
/**
* This tests the "unreadable by superuser" xattr which denies access to a
* file for the superuser. See HDFS-6705 for details.
*/
@Test(timeout = 120000)
public void testUnreadableBySuperuserXAttr() throws Exception {
// Run tests as superuser...
doTestUnreadableBySuperuserXAttr(fs, true);
// ...and again as non-superuser
final UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] { "mygroup" });
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
doTestUnreadableBySuperuserXAttr(userFs, false);
return null;
}
});
}
示例3: testRpcFallbackToSimpleAuth
import org.apache.hadoop.security.UserGroupInformation; //導入方法依賴的package包/類
public void testRpcFallbackToSimpleAuth(Class<? extends RpcClient> rpcImplClass) throws Exception {
String krbKeytab = getKeytabFileForTesting();
String krbPrincipal = getPrincipalForTesting();
UserGroupInformation ugi = loginKerberosPrincipal(krbKeytab, krbPrincipal);
assertEquals(AuthenticationMethod.KERBEROS, ugi.getAuthenticationMethod());
assertEquals(krbPrincipal, ugi.getUserName());
String clientUsername = "testuser";
UserGroupInformation clientUgi = UserGroupInformation.createUserForTesting(clientUsername,
new String[]{clientUsername});
// check that the client user is insecure
assertNotSame(ugi, clientUgi);
assertEquals(AuthenticationMethod.SIMPLE, clientUgi.getAuthenticationMethod());
assertEquals(clientUsername, clientUgi.getUserName());
Configuration clientConf = new Configuration();
clientConf.set(User.HBASE_SECURITY_CONF_KEY, "simple");
callRpcService(rpcImplClass, User.create(clientUgi), clientConf, true);
}
示例4: testGroups
import org.apache.hadoop.security.UserGroupInformation; //導入方法依賴的package包/類
@Test
public void testGroups() {
Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
Configuration conf = new Configuration();
String jobOwner = "testuser";
conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
String user = "testuser2";
String adminGroup = "adminGroup";
conf.set(MRConfig.MR_ADMINS, " " + adminGroup);
JobACLsManager aclsManager = new JobACLsManager(conf);
tmpJobACLs = aclsManager.constructJobACLs(conf);
final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
user, new String[] {adminGroup});
// acls off so anyone should have access
boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
jobACLs.get(JobACL.VIEW_JOB));
assertTrue("user in admin group should have access", val);
}
示例5: createHdfsWithDifferentUsername
import org.apache.hadoop.security.UserGroupInformation; //導入方法依賴的package包/類
/**
* Returns the reference to a new instance of FileSystem created
* with different user name
* @param conf current Configuration
* @return FileSystem instance
* @throws IOException
* @throws InterruptedException
*/
public static FileSystem createHdfsWithDifferentUsername(final Configuration conf
) throws IOException, InterruptedException {
String username = UserGroupInformation.getCurrentUser().getShortUserName()+"_XXX";
UserGroupInformation ugi =
UserGroupInformation.createUserForTesting(username, new String[]{"supergroup"});
return DFSTestUtil.getFileSystemAs(ugi, conf);
}
示例6: setupCluster
import org.apache.hadoop.security.UserGroupInformation; //導入方法依賴的package包/類
@BeforeClass
public static void setupCluster() {
final Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
//change root permission to 777
cluster.getFileSystem().setPermission(
new Path("/"), new FsPermission((short)0777));
final String uri = WebHdfsFileSystem.SCHEME + "://"
+ conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
//get file system as a non-superuser
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
current.getShortUserName() + "x", new String[]{"user"});
fileSystem = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(new URI(uri), conf);
}
});
defaultWorkingDirectory = fileSystem.getWorkingDirectory();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
示例7: testListEncryptionZonesAsNonSuperUser
import org.apache.hadoop.security.UserGroupInformation; //導入方法依賴的package包/類
/**
* Test listing encryption zones as a non super user.
*/
@Test(timeout = 60000)
public void testListEncryptionZonesAsNonSuperUser() throws Exception {
final UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] { "mygroup" });
final Path testRoot = new Path("/tmp/TestEncryptionZones");
final Path superPath = new Path(testRoot, "superuseronly");
final Path allPath = new Path(testRoot, "accessall");
fsWrapper.mkdir(superPath, new FsPermission((short) 0700), true);
dfsAdmin.createEncryptionZone(superPath, TEST_KEY);
fsWrapper.mkdir(allPath, new FsPermission((short) 0707), true);
dfsAdmin.createEncryptionZone(allPath, TEST_KEY);
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final HdfsAdmin userAdmin =
new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
try {
userAdmin.listEncryptionZones();
} catch (AccessControlException e) {
assertExceptionContains("Superuser privilege is required", e);
}
return null;
}
});
}
示例8: setUpUsers
import org.apache.hadoop.security.UserGroupInformation; //導入方法依賴的package包/類
@Before
public void setUpUsers() throws IOException {
// Make sure the current user's info is in the list of test users.
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
UserGroupInformation.createUserForTesting(currentUser.getUserName(), currentUser.getGroupNames());
testUser1 = UserGroupInformation.createUserForTesting("foo", new String[]{"bar", "baz"});
testUser2 = UserGroupInformation.createUserForTesting("fiz", new String[]{"buz", "boz"});
}
示例9: testLsr
import org.apache.hadoop.security.UserGroupInformation; //導入方法依賴的package包/類
@Test (timeout = 30000)
public void testLsr() throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem dfs = cluster.getFileSystem();
try {
final String root = createTree(dfs, "lsr");
dfs.mkdirs(new Path(root, "zzz"));
runLsr(new FsShell(conf), root, 0);
final Path sub = new Path(root, "sub");
dfs.setPermission(sub, new FsPermission((short)0));
final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
final String tmpusername = ugi.getShortUserName() + "1";
UserGroupInformation tmpUGI = UserGroupInformation.createUserForTesting(
tmpusername, new String[] {tmpusername});
String results = tmpUGI.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws Exception {
return runLsr(new FsShell(conf), root, 1);
}
});
assertTrue(results.contains("zzz"));
} finally {
cluster.shutdown();
}
}
示例10: testTimelineClientInDSAppMaster
import org.apache.hadoop.security.UserGroupInformation; //導入方法依賴的package包/類
@Test
public void testTimelineClientInDSAppMaster() throws Exception {
ApplicationMaster appMaster = new ApplicationMaster();
appMaster.appSubmitterUgi =
UserGroupInformation.createUserForTesting("foo", new String[]{"bar"});
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
appMaster.startTimelineClient(conf);
Assert.assertEquals(appMaster.appSubmitterUgi,
((TimelineClientImpl)appMaster.timelineClient).getUgi());
}
示例11: setup
import org.apache.hadoop.security.UserGroupInformation; //導入方法依賴的package包/類
@Before
public void setup() throws Exception {
user1 = UserGroupInformation.createUserForTesting("alice",
new String[]{"users"});
user2 = UserGroupInformation.createUserForTesting("bob",
new String[]{"users"});
cluster = new MiniMRCluster(0,0,1,"file:///",1);
}
示例12: testFactory
import org.apache.hadoop.security.UserGroupInformation; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
@Test
public void testFactory() throws Exception {
final String[] groups = new String[]{"supergroup"};
final UserGroupInformation[] ugi = new UserGroupInformation[3];
for(int i = 0; i < ugi.length; i++) {
ugi[i] = UserGroupInformation.createUserForTesting("user" + i, groups);
}
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0)).when(mcp).getFileInfo(anyString());
Mockito
.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0))
.when(mcp)
.create(anyString(), (FsPermission) anyObject(), anyString(),
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
final Configuration conf = new Configuration();
final DFSClient c1 = createDFSClientAs(ugi[0], conf);
FSDataOutputStream out1 = createFsOut(c1, "/out1");
final DFSClient c2 = createDFSClientAs(ugi[0], conf);
FSDataOutputStream out2 = createFsOut(c2, "/out2");
Assert.assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer());
final DFSClient c3 = createDFSClientAs(ugi[1], conf);
FSDataOutputStream out3 = createFsOut(c3, "/out3");
Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
final DFSClient c4 = createDFSClientAs(ugi[1], conf);
FSDataOutputStream out4 = createFsOut(c4, "/out4");
Assert.assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer());
final DFSClient c5 = createDFSClientAs(ugi[2], conf);
FSDataOutputStream out5 = createFsOut(c5, "/out5");
Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
}
示例13: testTokenAuthentication
import org.apache.hadoop.security.UserGroupInformation; //導入方法依賴的package包/類
@Test
public void testTokenAuthentication() throws Exception {
UserGroupInformation testuser =
UserGroupInformation.createUserForTesting("testuser", new String[]{"testgroup"});
testuser.setAuthenticationMethod(
UserGroupInformation.AuthenticationMethod.TOKEN);
final Configuration conf = TEST_UTIL.getConfiguration();
UserGroupInformation.setConfiguration(conf);
Token<AuthenticationTokenIdentifier> token =
secretManager.generateToken("testuser");
LOG.debug("Got token: " + token.toString());
testuser.addToken(token);
// verify the server authenticates us as this token user
testuser.doAs(new PrivilegedExceptionAction<Object>() {
public Object run() throws Exception {
Configuration c = server.getConfiguration();
RpcClient rpcClient = RpcClientFactory.createClient(c, clusterId.toString());
ServerName sn =
ServerName.valueOf(server.getAddress().getHostName(), server.getAddress().getPort(),
System.currentTimeMillis());
try {
BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn,
User.getCurrent(), HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
AuthenticationProtos.AuthenticationService.BlockingInterface stub =
AuthenticationProtos.AuthenticationService.newBlockingStub(channel);
AuthenticationProtos.WhoAmIResponse response =
stub.whoAmI(null, AuthenticationProtos.WhoAmIRequest.getDefaultInstance());
String myname = response.getUsername();
assertEquals("testuser", myname);
String authMethod = response.getAuthMethod();
assertEquals("TOKEN", authMethod);
} finally {
rpcClient.close();
}
return null;
}
});
}
示例14: loginTestUser
import org.apache.hadoop.security.UserGroupInformation; //導入方法依賴的package包/類
private static void loginTestUser(String username, String group) {
UserGroupInformation currentUgi = UserGroupInformation.createUserForTesting(username, new String[]{group});
UserGroupInformation.setLoginUser(currentUgi);
}
示例15: testRemoteException
import org.apache.hadoop.security.UserGroupInformation; //導入方法依賴的package包/類
@Test (timeout = 30000)
public void testRemoteException() throws Exception {
UserGroupInformation tmpUGI =
UserGroupInformation.createUserForTesting("tmpname", new String[] {"mygroup"});
MiniDFSCluster dfs = null;
PrintStream bak = null;
try {
final Configuration conf = new HdfsConfiguration();
dfs = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = dfs.getFileSystem();
Path p = new Path("/foo");
fs.mkdirs(p);
fs.setPermission(p, new FsPermission((short)0700));
bak = System.err;
tmpUGI.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
FsShell fshell = new FsShell(conf);
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream tmp = new PrintStream(out);
System.setErr(tmp);
String[] args = new String[2];
args[0] = "-ls";
args[1] = "/foo";
int ret = ToolRunner.run(fshell, args);
assertEquals("returned should be 1", 1, ret);
String str = out.toString();
assertTrue("permission denied printed",
str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
});
} finally {
if (bak != null) {
System.setErr(bak);
}
if (dfs != null) {
dfs.shutdown();
}
}
}