本文整理汇总了Java中org.apache.hadoop.security.authentication.util.KerberosName.setRules方法的典型用法代码示例。如果您正苦于以下问题:Java KerberosName.setRules方法的具体用法?Java KerberosName.setRules怎么用?Java KerberosName.setRules使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.security.authentication.util.KerberosName
的用法示例。
在下文中一共展示了KerberosName.setRules方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testEnsureInitWithRules
import org.apache.hadoop.security.authentication.util.KerberosName; //导入方法依赖的package包/类
@Test (timeout = 30000)
public void testEnsureInitWithRules() throws IOException {
String rules = "RULE:[1:RULE1]";
// trigger implicit init, rules should init
UserGroupInformation.reset();
assertFalse(KerberosName.hasRulesBeenSet());
UserGroupInformation.createUserForTesting("someone", new String[0]);
assertTrue(KerberosName.hasRulesBeenSet());
// set a rule, trigger implicit init, rule should not change
UserGroupInformation.reset();
KerberosName.setRules(rules);
assertTrue(KerberosName.hasRulesBeenSet());
assertEquals(rules, KerberosName.getRules());
UserGroupInformation.createUserForTesting("someone", new String[0]);
assertEquals(rules, KerberosName.getRules());
}
示例2: testSetConfigWithRules
import org.apache.hadoop.security.authentication.util.KerberosName; //导入方法依赖的package包/类
@Test (timeout = 30000)
public void testSetConfigWithRules() {
String[] rules = { "RULE:[1:TEST1]", "RULE:[1:TEST2]", "RULE:[1:TEST3]" };
// explicitly set a rule
UserGroupInformation.reset();
assertFalse(KerberosName.hasRulesBeenSet());
KerberosName.setRules(rules[0]);
assertTrue(KerberosName.hasRulesBeenSet());
assertEquals(rules[0], KerberosName.getRules());
// implicit init should honor rules already being set
UserGroupInformation.createUserForTesting("someone", new String[0]);
assertEquals(rules[0], KerberosName.getRules());
// set conf, should override
conf.set(HADOOP_SECURITY_AUTH_TO_LOCAL, rules[1]);
UserGroupInformation.setConfiguration(conf);
assertEquals(rules[1], KerberosName.getRules());
// set conf, should again override
conf.set(HADOOP_SECURITY_AUTH_TO_LOCAL, rules[2]);
UserGroupInformation.setConfiguration(conf);
assertEquals(rules[2], KerberosName.getRules());
// implicit init should honor rules already being set
UserGroupInformation.createUserForTesting("someone", new String[0]);
assertEquals(rules[2], KerberosName.getRules());
}
示例3: buildSpnegoConfiguration
import org.apache.hadoop.security.authentication.util.KerberosName; //导入方法依赖的package包/类
private static Configuration buildSpnegoConfiguration(String serverPrincipal, File
serverKeytab) {
Configuration conf = new Configuration();
KerberosName.setRules("DEFAULT");
conf.setInt(HttpServer.HTTP_MAX_THREADS, TestHttpServer.MAX_THREADS);
// Enable Kerberos (pre-req)
conf.set("hbase.security.authentication", "kerberos");
conf.set(HttpServer.HTTP_UI_AUTHENTICATION, "kerberos");
conf.set(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY, serverPrincipal);
conf.set(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY, serverKeytab.getAbsolutePath());
return conf;
}
示例4: testIsValidRequestor
import org.apache.hadoop.security.authentication.util.KerberosName; //导入方法依赖的package包/类
@Test
public void testIsValidRequestor() throws IOException {
Configuration conf = new HdfsConfiguration();
KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]");
// Set up generic HA configs.
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
"ns1"), "nn1,nn2");
// Set up NN1 HA configs.
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
"ns1", "nn1"), "host1:1234");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,
"ns1", "nn1"), "hdfs/[email protected]");
// Set up NN2 HA configs.
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
"ns1", "nn2"), "host2:1234");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,
"ns1", "nn2"), "hdfs/[email protected]");
// Initialize this conf object as though we're running on NN1.
NameNode.initializeGenericKeys(conf, "ns1", "nn1");
AccessControlList acls = Mockito.mock(AccessControlList.class);
Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
ServletContext context = Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
// Make sure that NN2 is considered a valid fsimage/edits requestor.
assertTrue(ImageServlet.isValidRequestor(context,
"hdfs/[email protected]TEST-REALM.COM", conf));
// Mark atm as an admin.
Mockito.when(acls.isUserAllowed(Mockito.argThat(new ArgumentMatcher<UserGroupInformation>() {
@Override
public boolean matches(Object argument) {
return ((UserGroupInformation) argument).getShortUserName().equals("atm");
}
}))).thenReturn(true);
// Make sure that NN2 is still considered a valid requestor.
assertTrue(ImageServlet.isValidRequestor(context,
"hdfs/[email protected]", conf));
// Make sure an admin is considered a valid requestor.
assertTrue(ImageServlet.isValidRequestor(context,
"[email protected]", conf));
// Make sure other users are *not* considered valid requestors.
assertFalse(ImageServlet.isValidRequestor(context,
"[email protected]", conf));
}
示例5: testIsValidRequestor
import org.apache.hadoop.security.authentication.util.KerberosName; //导入方法依赖的package包/类
@Test
public void testIsValidRequestor() throws IOException {
Configuration conf = new HdfsConfiguration();
KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]");
// Set up generic HA configs.
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
"ns1"), "nn1,nn2");
// Set up NN1 HA configs.
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
"ns1", "nn1"), "host1:1234");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,
"ns1", "nn1"), "hdfs/[email protected]");
// Set up NN2 HA configs.
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
"ns1", "nn2"), "host2:1234");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,
"ns1", "nn2"), "hdfs/[email protected]");
// Initialize this conf object as though we're running on NN1.
NameNode.initializeGenericKeys(conf, "ns1", "nn1");
AccessControlList acls = Mockito.mock(AccessControlList.class);
Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
ServletContext context = Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
// Make sure that NN2 is considered a valid fsimage/edits requestor.
assertTrue(ImageServlet.isValidRequestor(context,
"hdfs/[email protected]", conf));
// Mark atm as an admin.
Mockito.when(acls.isUserAllowed(Mockito.argThat(new ArgumentMatcher<UserGroupInformation>() {
@Override
public boolean matches(Object argument) {
return ((UserGroupInformation) argument).getShortUserName().equals("atm");
}
}))).thenReturn(true);
// Make sure that NN2 is still considered a valid requestor.
assertTrue(ImageServlet.isValidRequestor(context,
"hdfs/[email protected]", conf));
// Make sure an admin is considered a valid requestor.
assertTrue(ImageServlet.isValidRequestor(context,
"[email protected]", conf));
// Make sure other users are *not* considered valid requestors.
assertFalse(ImageServlet.isValidRequestor(context,
"[email protected]", conf));
}
示例6: testIsValidRequestor
import org.apache.hadoop.security.authentication.util.KerberosName; //导入方法依赖的package包/类
@Test
public void testIsValidRequestor() throws IOException {
Configuration conf = new HdfsConfiguration();
KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]");
// Set up generic HA configs.
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
"ns1"), "nn1,nn2");
// Set up NN1 HA configs.
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
"ns1", "nn1"), "host1:1234");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
"ns1", "nn1"), "hdfs/[email protected]");
// Set up NN2 HA configs.
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
"ns1", "nn2"), "host2:1234");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
"ns1", "nn2"), "hdfs/[email protected]");
// Initialize this conf object as though we're running on NN1.
NameNode.initializeGenericKeys(conf, "ns1", "nn1");
AccessControlList acls = Mockito.mock(AccessControlList.class);
Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
ServletContext context = Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
// Make sure that NN2 is considered a valid fsimage/edits requestor.
assertTrue(GetImageServlet.isValidRequestor(context,
"hdfs/[email protected]", conf));
// Mark atm as an admin.
Mockito.when(acls.isUserAllowed(Mockito.argThat(new ArgumentMatcher<UserGroupInformation>() {
@Override
public boolean matches(Object argument) {
return ((UserGroupInformation) argument).getShortUserName().equals("atm");
}
}))).thenReturn(true);
// Make sure that NN2 is still considered a valid requestor.
assertTrue(GetImageServlet.isValidRequestor(context,
"hdfs/[email protected]", conf));
// Make sure an admin is considered a valid requestor.
assertTrue(GetImageServlet.isValidRequestor(context,
"[email protected]", conf));
// Make sure other users are *not* considered valid requestors.
assertFalse(GetImageServlet.isValidRequestor(context,
"[email protected].COM", conf));
}
示例7: testIsValidRequestor
import org.apache.hadoop.security.authentication.util.KerberosName; //导入方法依赖的package包/类
@Test
public void testIsValidRequestor() throws IOException {
Configuration conf = new HdfsConfiguration();
KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]");
// Set up generic HA configs.
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
"ns1"), "nn1,nn2");
// Set up NN1 HA configs.
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
"ns1", "nn1"), "host1:1234");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
"ns1", "nn1"), "hdfs/[email protected]");
// Set up NN2 HA configs.
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
"ns1", "nn2"), "host2:1234");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
"ns1", "nn2"), "hdfs/[email protected]");
// Initialize this conf object as though we're running on NN1.
NameNode.initializeGenericKeys(conf, "ns1", "nn1");
AccessControlList acls = Mockito.mock(AccessControlList.class);
Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
ServletContext context = Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
// Make sure that NN2 is considered a valid fsimage/edits requestor.
assertTrue(GetImageServlet.isValidRequestor(context,
"hdfs/[email protected]", conf));
// Mark atm as an admin.
Mockito.when(acls.isUserAllowed(Mockito.argThat(new ArgumentMatcher<UserGroupInformation>() {
@Override
public boolean matches(Object argument) {
return ((UserGroupInformation) argument).getShortUserName().equals("atm");
}
}))).thenReturn(true);
// Make sure that NN2 is still considered a valid requestor.
assertTrue(GetImageServlet.isValidRequestor(context,
"hdfs/[email protected]", conf));
// Make sure an admin is considered a valid requestor.
assertTrue(GetImageServlet.isValidRequestor(context,
"[email protected]", conf));
// Make sure other users are *not* considered valid requestors.
assertFalse(GetImageServlet.isValidRequestor(context,
"[email protected]", conf));
}
示例8: testIsValidRequestor
import org.apache.hadoop.security.authentication.util.KerberosName; //导入方法依赖的package包/类
@Test
public void testIsValidRequestor() throws IOException {
Configuration conf = new HdfsConfiguration();
KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]");
// Set up generic HA configs.
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
"ns1"), "nn1,nn2");
// Set up NN1 HA configs.
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
"ns1", "nn1"), "host1:1234");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
"ns1", "nn1"), "hdfs/[email protected]");
// Set up NN2 HA configs.
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
"ns1", "nn2"), "host2:1234");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
"ns1", "nn2"), "hdfs/[email protected]");
// Initialize this conf object as though we're running on NN1.
NameNode.initializeGenericKeys(conf, "ns1", "nn1");
AccessControlList acls = Mockito.mock(AccessControlList.class);
Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
ServletContext context = Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
// Make sure that NN2 is considered a valid fsimage/edits requestor.
assertTrue(ImageServlet.isValidRequestor(context,
"hdfs/[email protected]", conf));
// Mark atm as an admin.
Mockito.when(acls.isUserAllowed(Mockito.argThat(new ArgumentMatcher<UserGroupInformation>() {
@Override
public boolean matches(Object argument) {
return ((UserGroupInformation) argument).getShortUserName().equals("atm");
}
}))).thenReturn(true);
// Make sure that NN2 is still considered a valid requestor.
assertTrue(ImageServlet.isValidRequestor(context,
"hdfs/[email protected]", conf));
// Make sure an admin is considered a valid requestor.
assertTrue(ImageServlet.isValidRequestor(context,
"[email protected]", conf));
// Make sure other users are *not* considered valid requestors.
assertFalse(ImageServlet.isValidRequestor(context,
"[email protected]", conf));
}
示例9: initHadoopSecurity
import org.apache.hadoop.security.authentication.util.KerberosName; //导入方法依赖的package包/类
/**
* Init hadoop security by setting up the UGI config
*/
public static void initHadoopSecurity() {
UserGroupInformation.setConfiguration(CONF);
KerberosName.setRules(kerberosRule);
}