当前位置: 首页>>代码示例>>Java>>正文


Java UserGroupInformation.createUserForTesting方法代码示例

本文整理汇总了Java中org.apache.hadoop.security.UserGroupInformation.createUserForTesting方法的典型用法代码示例。如果您正苦于以下问题:Java UserGroupInformation.createUserForTesting方法的具体用法?Java UserGroupInformation.createUserForTesting怎么用?Java UserGroupInformation.createUserForTesting使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.security.UserGroupInformation的用法示例。


在下文中一共展示了UserGroupInformation.createUserForTesting方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkFile

import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
static void checkFile(Path p, int expectedsize, final Configuration conf
    ) throws IOException, InterruptedException {
  //open the file with another user account
  final String username = UserGroupInformation.getCurrentUser().getShortUserName()
      + "_" + ++userCount;

  UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, 
                               new String[] {"supergroup"});
  
  final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
  
  final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);

  //Check visible length
  Assert.assertTrue(in.getVisibleLength() >= expectedsize);

  //Able to read?
  for(int i = 0; i < expectedsize; i++) {
    Assert.assertEquals((byte)i, (byte)in.read());  
  }

  in.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestReadWhileWriting.java

示例2: testUnreadableBySuperuserXAttr

import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
/**
 * This tests the "unreadable by superuser" xattr which denies access to a
 * file for the superuser. See HDFS-6705 for details.
 */
@Test(timeout = 120000)
public void testUnreadableBySuperuserXAttr() throws Exception {
  // Run tests as superuser...
  doTestUnreadableBySuperuserXAttr(fs, true);

  // ...and again as non-superuser
  final UserGroupInformation user = UserGroupInformation.
      createUserForTesting("user", new String[] { "mygroup" });
  user.doAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Object run() throws Exception {
      final FileSystem userFs = dfsCluster.getFileSystem();
      doTestUnreadableBySuperuserXAttr(userFs, false);
      return null;
    }
  });
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:FSXAttrBaseTest.java

示例3: testRpcFallbackToSimpleAuth

import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
public void testRpcFallbackToSimpleAuth(Class<? extends RpcClient> rpcImplClass) throws Exception {
  String krbKeytab = getKeytabFileForTesting();
  String krbPrincipal = getPrincipalForTesting();

  UserGroupInformation ugi = loginKerberosPrincipal(krbKeytab, krbPrincipal);
  assertEquals(AuthenticationMethod.KERBEROS, ugi.getAuthenticationMethod());
  assertEquals(krbPrincipal, ugi.getUserName());

  String clientUsername = "testuser";
  UserGroupInformation clientUgi = UserGroupInformation.createUserForTesting(clientUsername,
      new String[]{clientUsername});

  // check that the client user is insecure
  assertNotSame(ugi, clientUgi);
  assertEquals(AuthenticationMethod.SIMPLE, clientUgi.getAuthenticationMethod());
  assertEquals(clientUsername, clientUgi.getUserName());

  Configuration clientConf = new Configuration();
  clientConf.set(User.HBASE_SECURITY_CONF_KEY, "simple");
  callRpcService(rpcImplClass, User.create(clientUgi), clientConf, true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestSecureRPC.java

示例4: testGroups

import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Test
public void testGroups() {
  Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
  Configuration conf = new Configuration();
  String jobOwner = "testuser";
  conf.set(JobACL.VIEW_JOB.getAclName(), jobOwner);
  conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
  String user = "testuser2";
  String adminGroup = "adminGroup";
  conf.set(MRConfig.MR_ADMINS, " " + adminGroup);

  JobACLsManager aclsManager = new JobACLsManager(conf);
  tmpJobACLs = aclsManager.constructJobACLs(conf);
  final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;

  UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting(
   user, new String[] {adminGroup});
  // acls off so anyone should have access
  boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner,
      jobACLs.get(JobACL.VIEW_JOB));
  assertTrue("user in admin group should have access", val);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestJobAclsManager.java

示例5: createHdfsWithDifferentUsername

import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
/**
 * Returns the reference to a new instance of FileSystem created 
 * with different user name
 * @param conf current Configuration
 * @return FileSystem instance
 * @throws IOException
 * @throws InterruptedException 
 */
public static FileSystem createHdfsWithDifferentUsername(final Configuration conf
    ) throws IOException, InterruptedException {
  String username = UserGroupInformation.getCurrentUser().getShortUserName()+"_XXX";
  UserGroupInformation ugi = 
    UserGroupInformation.createUserForTesting(username, new String[]{"supergroup"});
  
  return DFSTestUtil.getFileSystemAs(ugi, conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:AppendTestUtil.java

示例6: setupCluster

import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@BeforeClass
public static void setupCluster() {
  final Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitActive();

    //change root permission to 777
    cluster.getFileSystem().setPermission(
        new Path("/"), new FsPermission((short)0777));

    final String uri = WebHdfsFileSystem.SCHEME  + "://"
        + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);

    //get file system as a non-superuser
    final UserGroupInformation current = UserGroupInformation.getCurrentUser();
    final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
        current.getShortUserName() + "x", new String[]{"user"});
    fileSystem = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
      @Override
      public FileSystem run() throws Exception {
        return FileSystem.get(new URI(uri), conf);
      }
    });

    defaultWorkingDirectory = fileSystem.getWorkingDirectory();
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TestFSMainOperationsWebHdfs.java

示例7: testListEncryptionZonesAsNonSuperUser

import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
/**
 * Test listing encryption zones as a non super user.
 */
@Test(timeout = 60000)
public void testListEncryptionZonesAsNonSuperUser() throws Exception {

  final UserGroupInformation user = UserGroupInformation.
      createUserForTesting("user", new String[] { "mygroup" });

  final Path testRoot = new Path("/tmp/TestEncryptionZones");
  final Path superPath = new Path(testRoot, "superuseronly");
  final Path allPath = new Path(testRoot, "accessall");

  fsWrapper.mkdir(superPath, new FsPermission((short) 0700), true);
  dfsAdmin.createEncryptionZone(superPath, TEST_KEY);

  fsWrapper.mkdir(allPath, new FsPermission((short) 0707), true);
  dfsAdmin.createEncryptionZone(allPath, TEST_KEY);

  user.doAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Object run() throws Exception {
      final HdfsAdmin userAdmin =
          new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
      try {
        userAdmin.listEncryptionZones();
      } catch (AccessControlException e) {
        assertExceptionContains("Superuser privilege is required", e);
      }
      return null;
    }
  });
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestEncryptionZones.java

示例8: setUpUsers

import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Before
public void setUpUsers() throws IOException {
  // Make sure the current user's info is in the list of test users.
  UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
  UserGroupInformation.createUserForTesting(currentUser.getUserName(), currentUser.getGroupNames());
  
  testUser1 = UserGroupInformation.createUserForTesting("foo", new String[]{"bar", "baz"});
  testUser2 = UserGroupInformation.createUserForTesting("fiz", new String[]{"buz", "boz"});
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:10,代码来源:GetGroupsTestBase.java

示例9: testLsr

import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Test (timeout = 30000)
public void testLsr() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  DistributedFileSystem dfs = cluster.getFileSystem();

  try {
    final String root = createTree(dfs, "lsr");
    dfs.mkdirs(new Path(root, "zzz"));
    
    runLsr(new FsShell(conf), root, 0);
    
    final Path sub = new Path(root, "sub");
    dfs.setPermission(sub, new FsPermission((short)0));

    final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    final String tmpusername = ugi.getShortUserName() + "1";
    UserGroupInformation tmpUGI = UserGroupInformation.createUserForTesting(
        tmpusername, new String[] {tmpusername});
    String results = tmpUGI.doAs(new PrivilegedExceptionAction<String>() {
      @Override
      public String run() throws Exception {
        return runLsr(new FsShell(conf), root, 1);
      }
    });
    assertTrue(results.contains("zzz"));
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestDFSShell.java

示例10: testTimelineClientInDSAppMaster

import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Test
public void testTimelineClientInDSAppMaster() throws Exception {
  ApplicationMaster appMaster = new ApplicationMaster();
  appMaster.appSubmitterUgi =
      UserGroupInformation.createUserForTesting("foo", new String[]{"bar"});
  Configuration conf = new YarnConfiguration();
  conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
  appMaster.startTimelineClient(conf);
  Assert.assertEquals(appMaster.appSubmitterUgi,
      ((TimelineClientImpl)appMaster.timelineClient).getUgi());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestDSAppMaster.java

示例11: setup

import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Before
public void setup() throws Exception {
  user1 = UserGroupInformation.createUserForTesting("alice", 
                                                    new String[]{"users"});
  user2 = UserGroupInformation.createUserForTesting("bob", 
                                                    new String[]{"users"});
  cluster = new MiniMRCluster(0,0,1,"file:///",1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestDelegationToken.java

示例12: testFactory

import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Test
public void testFactory() throws Exception {
  final String[] groups = new String[]{"supergroup"};
  final UserGroupInformation[] ugi = new UserGroupInformation[3];
  for(int i = 0; i < ugi.length; i++) {
    ugi[i] = UserGroupInformation.createUserForTesting("user" + i, groups);
  }

  Mockito.doReturn(
      new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
          (short) 777), "owner", "group", new byte[0], new byte[0],
          1010, 0, null, (byte) 0)).when(mcp).getFileInfo(anyString());
  Mockito
      .doReturn(
          new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
              (short) 777), "owner", "group", new byte[0], new byte[0],
              1010, 0, null, (byte) 0))
      .when(mcp)
      .create(anyString(), (FsPermission) anyObject(), anyString(),
          (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
          anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());

  final Configuration conf = new Configuration();
  final DFSClient c1 = createDFSClientAs(ugi[0], conf);
  FSDataOutputStream out1 = createFsOut(c1, "/out1");
  final DFSClient c2 = createDFSClientAs(ugi[0], conf);
  FSDataOutputStream out2 = createFsOut(c2, "/out2");
  Assert.assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer());
  final DFSClient c3 = createDFSClientAs(ugi[1], conf);
  FSDataOutputStream out3 = createFsOut(c3, "/out3");
  Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
  final DFSClient c4 = createDFSClientAs(ugi[1], conf);
  FSDataOutputStream out4 = createFsOut(c4, "/out4");
  Assert.assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer());
  final DFSClient c5 = createDFSClientAs(ugi[2], conf);
  FSDataOutputStream out5 = createFsOut(c5, "/out5");
  Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
  Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:TestLease.java

示例13: testTokenAuthentication

import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Test
public void testTokenAuthentication() throws Exception {
  UserGroupInformation testuser =
      UserGroupInformation.createUserForTesting("testuser", new String[]{"testgroup"});

  testuser.setAuthenticationMethod(
      UserGroupInformation.AuthenticationMethod.TOKEN);
  final Configuration conf = TEST_UTIL.getConfiguration();
  UserGroupInformation.setConfiguration(conf);
  Token<AuthenticationTokenIdentifier> token =
      secretManager.generateToken("testuser");
  LOG.debug("Got token: " + token.toString());
  testuser.addToken(token);

  // verify the server authenticates us as this token user
  testuser.doAs(new PrivilegedExceptionAction<Object>() {
    public Object run() throws Exception {
      Configuration c = server.getConfiguration();
      RpcClient rpcClient = RpcClientFactory.createClient(c, clusterId.toString());
      ServerName sn =
          ServerName.valueOf(server.getAddress().getHostName(), server.getAddress().getPort(),
              System.currentTimeMillis());
      try {
        BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn,
            User.getCurrent(), HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
        AuthenticationProtos.AuthenticationService.BlockingInterface stub =
            AuthenticationProtos.AuthenticationService.newBlockingStub(channel);
        AuthenticationProtos.WhoAmIResponse response =
            stub.whoAmI(null, AuthenticationProtos.WhoAmIRequest.getDefaultInstance());
        String myname = response.getUsername();
        assertEquals("testuser", myname);
        String authMethod = response.getAuthMethod();
        assertEquals("TOKEN", authMethod);
      } finally {
        rpcClient.close();
      }
      return null;
    }
  });
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:TestTokenAuthentication.java

示例14: loginTestUser

import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
private static void loginTestUser(String username, String group) {
    UserGroupInformation currentUgi = UserGroupInformation.createUserForTesting(username, new String[]{group});
    UserGroupInformation.setLoginUser(currentUgi);
}
 
开发者ID:mapr-demos,项目名称:mapr-music,代码行数:5,代码来源:MaprDbDao.java

示例15: testRemoteException

import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Test (timeout = 30000)
public void testRemoteException() throws Exception {
  UserGroupInformation tmpUGI = 
    UserGroupInformation.createUserForTesting("tmpname", new String[] {"mygroup"});
  MiniDFSCluster dfs = null;
  PrintStream bak = null;
  try {
    final Configuration conf = new HdfsConfiguration();
    dfs = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    FileSystem fs = dfs.getFileSystem();
    Path p = new Path("/foo");
    fs.mkdirs(p);
    fs.setPermission(p, new FsPermission((short)0700));
    bak = System.err;
    
    tmpUGI.doAs(new PrivilegedExceptionAction<Object>() {
      @Override
      public Object run() throws Exception {
        FsShell fshell = new FsShell(conf);
        ByteArrayOutputStream out = new ByteArrayOutputStream();
        PrintStream tmp = new PrintStream(out);
        System.setErr(tmp);
        String[] args = new String[2];
        args[0] = "-ls";
        args[1] = "/foo";
        int ret = ToolRunner.run(fshell, args);
        assertEquals("returned should be 1", 1, ret);
        String str = out.toString();
        assertTrue("permission denied printed", 
                   str.indexOf("Permission denied") != -1);
        out.reset();           
        return null;
      }
    });
  } finally {
    if (bak != null) {
      System.setErr(bak);
    }
    if (dfs != null) {
      dfs.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:44,代码来源:TestDFSShell.java


注:本文中的org.apache.hadoop.security.UserGroupInformation.createUserForTesting方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。