本文整理汇总了Java中org.apache.hadoop.security.UserGroupInformation.setConfiguration方法的典型用法代码示例。如果您正苦于以下问题:Java UserGroupInformation.setConfiguration方法的具体用法?Java UserGroupInformation.setConfiguration怎么用?Java UserGroupInformation.setConfiguration使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.security.UserGroupInformation
的用法示例。
在下文中一共展示了UserGroupInformation.setConfiguration方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setup
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Before
public void setup() {
LOG.info("---------------------------------");
LOG.info("Testing QOP:"+ getQOPNames(qop));
LOG.info("---------------------------------");
conf = new Configuration();
// the specific tests for kerberos will enable kerberos. forcing it
// for all tests will cause tests to fail if the user has a TGT
conf.set(HADOOP_SECURITY_AUTHENTICATION, SIMPLE.toString());
conf.set(HADOOP_RPC_PROTECTION, getQOPNames(qop));
if (saslPropertiesResolver != null){
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS,
saslPropertiesResolver);
}
UserGroupInformation.setConfiguration(conf);
enableSecretManager = null;
forceSecretManager = null;
clientFallBackToSimpleAllowed = true;
}
示例2: RpcProgramMountd
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
public RpcProgramMountd(NfsConfiguration config,
DatagramSocket registrationSocket, boolean allowInsecurePorts)
throws IOException {
// Note that RPC cache is not enabled
super("mountd", "localhost", config.getInt(
NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY,
NfsConfigKeys.DFS_NFS_MOUNTD_PORT_DEFAULT), PROGRAM, VERSION_1,
VERSION_3, registrationSocket, allowInsecurePorts);
exports = new ArrayList<String>();
exports.add(config.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY,
NfsConfigKeys.DFS_NFS_EXPORT_POINT_DEFAULT));
this.hostsMatcher = NfsExports.getInstance(config);
this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>());
UserGroupInformation.setConfiguration(config);
SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
this.dfsClient = new DFSClient(NameNode.getAddress(config), config);
}
示例3: main
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
public static void main(String[] args) {
String rootPath = "hdfs://nameservice1";
Path p = new Path(rootPath + "/tmp/file.txt");
Configuration conf = new Configuration();
conf.addResource("core-site.xml");
conf.addResource("hdfs-site.xml");
conf.addResource("yarn-site.xml");
try {
// 没开kerberos,注释下面两行
UserGroupInformation.setConfiguration(conf);
UserGroupInformation.loginUserFromKeytab("[email protected]","E:\\星环\\hdfs.keytab");
FileSystem fs = p.getFileSystem(conf);
boolean b = fs.delete(p, true);
System.out.println(b);
fs.close();
} catch (IOException e) {
e.printStackTrace();
}
}
示例4: run
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
public int run(final String[] argv) throws IOException, InterruptedException {
int val = -1;
final Configuration conf = getConf();
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
val = ugi.doAs(new PrivilegedExceptionAction<Integer>() {
public Integer run() throws Exception {
return runJob(conf, argv);
}
});
// print the gridmix summary if the run was successful
if (val == 0) {
// print the run summary
System.out.print("\n\n");
System.out.println(summarizer.toString());
}
return val;
}
示例5: init
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Override
public void init(SubsetConfiguration metrics2Properties) {
properties = metrics2Properties;
basePath = new Path(properties.getString(BASEPATH_KEY, BASEPATH_DEFAULT));
source = properties.getString(SOURCE_KEY, SOURCE_DEFAULT);
ignoreError = properties.getBoolean(IGNORE_ERROR_KEY, false);
allowAppend = properties.getBoolean(ALLOW_APPEND_KEY, false);
conf = loadConf();
UserGroupInformation.setConfiguration(conf);
// Don't do secure setup if it's not needed.
if (UserGroupInformation.isSecurityEnabled()) {
// Validate config so that we don't get an NPE
checkForProperty(properties, KEYTAB_PROPERTY_KEY);
checkForProperty(properties, USERNAME_PROPERTY_KEY);
try {
// Login as whoever we're supposed to be and let the hostname be pulled
// from localhost. If security isn't enabled, this does nothing.
SecurityUtil.login(conf, properties.getString(KEYTAB_PROPERTY_KEY),
properties.getString(USERNAME_PROPERTY_KEY));
} catch (IOException ex) {
throw new MetricsException("Error logging in securely: ["
+ ex.toString() + "]", ex);
}
}
}
示例6: cleanUp
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@After
public void cleanUp() throws Exception {
jetty.stop();
// resetting hadoop security to simple
org.apache.hadoop.conf.Configuration conf =
new org.apache.hadoop.conf.Configuration();
UserGroupInformation.setConfiguration(conf);
}
示例7: setup
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Before
public void setup() throws IOException {
conf = getConf();
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
UserGroupInformation.setConfiguration(conf);
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
rmAddr = new InetSocketAddress("localhost", 8032);
Assert.assertTrue(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS > 1);
}
示例8: setUp
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@BeforeClass
public static void setUp() {
conf = new Configuration();
SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
UserGroupInformation.setConfiguration(conf);
UserGroupInformation.setLoginUser(
UserGroupInformation.createUserForTesting(
"LoginUser", new String[]{"supergroup"}));
}
示例9: setup
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Before
public void setup() throws UnknownHostException {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
conf = new YarnConfiguration();
UserGroupInformation.setConfiguration(conf);
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
}
示例10: testAppSubmissionWithOldDelegationTokenAfterRMRestart
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Test (timeout = 60000)
public void testAppSubmissionWithOldDelegationTokenAfterRMRestart()
throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
conf.set(YarnConfiguration.RM_ADDRESS, "localhost:8032");
UserGroupInformation.setConfiguration(conf);
MemoryRMStateStore memStore = new MemoryRMStateStore();
memStore.init(conf);
MockRM rm1 = new TestSecurityMockRM(conf, memStore);
rm1.start();
GetDelegationTokenRequest request1 =
GetDelegationTokenRequest.newInstance("renewer1");
UserGroupInformation.getCurrentUser().setAuthenticationMethod(
AuthMethod.KERBEROS);
GetDelegationTokenResponse response1 =
rm1.getClientRMService().getDelegationToken(request1);
Token<RMDelegationTokenIdentifier> token1 =
ConverterUtils.convertFromYarn(response1.getRMDelegationToken(), rmAddr);
// start new RM
MockRM rm2 = new TestSecurityMockRM(conf, memStore);
rm2.start();
// submit an app with the old delegation token got from previous RM.
Credentials ts = new Credentials();
ts.addToken(token1.getService(), token1);
RMApp app = rm2.submitApp(200, "name", "user",
new HashMap<ApplicationAccessType, String>(), false, "default", 1, ts);
rm2.waitForState(app.getApplicationId(), RMAppState.ACCEPTED);
}
示例11: createSecuredUserDir
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
public void createSecuredUserDir(String userName, String keytabdir) {
try {
UserGroupInformation.setConfiguration(conf);
UserGroupInformation.loginUserFromKeytab(getHDFSPrincipal(""),
keytabdir + File.separator + "hdfs.keytab");
FileSystem fs = FileSystem.get(conf);
Path userDir = new Path("/user" + File.separator + userName);
fs.mkdirs(userDir, new FsPermission(FsAction.ALL, FsPermission.getDefault().getGroupAction(),
FsPermission.getDefault().getOtherAction()));
fs.setOwner(userDir, userName, "hadoop");
} catch (IOException e) {
e.printStackTrace();
}
}
示例12: loginKerberosPrincipal
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
private UserGroupInformation loginKerberosPrincipal(String krbKeytab, String krbPrincipal)
throws Exception {
Configuration cnf = new Configuration();
cnf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(cnf);
UserGroupInformation.loginUserFromKeytab(krbPrincipal, krbKeytab);
return UserGroupInformation.getLoginUser();
}
示例13: getConfiguration
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
/**
* Creates the hadoop configuration object from the properties specified for tierstore
*
* @return configuration object
*/
public static Configuration getConfiguration(final Properties props) throws IOException {
Configuration conf = new Configuration();
String hdfsSiteXMLPath = props.getProperty(CommonConfig.HDFS_SITE_XML_PATH);
String hadoopSiteXMLPath = props.getProperty(CommonConfig.HADOOP_SITE_XML_PATH);
if (hdfsSiteXMLPath != null) {
conf.addResource(Paths.get(hdfsSiteXMLPath).toUri().toURL());
}
if (hadoopSiteXMLPath != null) {
conf.addResource(Paths.get(hadoopSiteXMLPath).toUri().toURL());
}
props.entrySet().forEach((PROP) -> {
conf.set(String.valueOf(PROP.getKey()), String.valueOf(PROP.getValue()));
});
// set secured properties
String userName = props.getProperty(CommonConfig.USER_NAME);
String keytabPath = props.getProperty(CommonConfig.KEYTAB_PATH);
if (userName == null || keytabPath == null) {
if (props.containsKey(ENABLE_KERBEROS_AUTHC)
&& Boolean.parseBoolean(props.getProperty(ENABLE_KERBEROS_AUTHC))) {
userName = props.getProperty(ResourceConstants.USER_NAME);
keytabPath = props.getProperty(ResourceConstants.PASSWORD);
}
}
// use the username and keytab
if (userName != null && keytabPath != null) {
// set kerberos authentication
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
UserGroupInformation.loginUserFromKeytab(userName, keytabPath);
}
return conf;
}
示例14: SchemaFetch
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
public SchemaFetch(Configuration conf)
throws IOException, InterruptedException {
logger = LoggerFactory.getLogger(getClass());
this.conf = conf;
schemaFileWriter = new FileWriter(this.conf.get(Constant.HDFS_SCHEMA_REMOTE_PATH_KEY));
sampleFileWriter = new FileWriter(this.conf.get(Constant.HDFS_SAMPLE_REMOTE_PATH_KEY));
// login from kerberos, get the file system
String principal = this.conf.get(Constant.HDFS_REMOTE_USER_KEY);
String keyLocation = this.conf.get(Constant.HDFS_REMOTE_KEYTAB_LOCATION_KEY, null);
if (keyLocation == null) {
System.out.println("No keytab file location specified, will ignore the kerberos login process");
fs = FileSystem.get(new Configuration());
} else {
try {
Configuration hdfs_conf = new Configuration();
hdfs_conf.set("hadoop.security.authentication", "Kerberos");
hdfs_conf.set("dfs.namenode.kerberos.principal.pattern", "*");
UserGroupInformation.setConfiguration(hdfs_conf);
UserGroupInformation.loginUserFromKeytab(principal, keyLocation);
fs = FileSystem.get(hdfs_conf);
} catch (IOException e) {
System.out
.println("Failed, Try to login through kerberos. Priciple: " + principal + " keytab location : " + keyLocation);
e.printStackTrace();
System.out.println("Use default, assume no kerbero needed");
fs = FileSystem.get(new Configuration());
}
}
// TODO Write to hdfs
// String sampleDataFolder = "/projects/wherehows/hdfs/sample_data";
// String cluster = this.conf.get("hdfs.cluster");
// sampleDataAvroWriter = new AvroWriter(this.fs, sampleDataFolder + "/" + cluster, SampleDataRecord.class);
// String schemaFolder = this.conf.get("hdfs.schema_location");
fileAnalyzerFactory = new FileAnalyzerFactory(this.fs);
}
示例15: testDelegationTokenRestoredInDelegationTokenRenewer
import org.apache.hadoop.security.UserGroupInformation; //导入方法依赖的package包/类
@Test (timeout = 60000)
public void testDelegationTokenRestoredInDelegationTokenRenewer()
throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
MemoryRMStateStore memStore = new MemoryRMStateStore();
memStore.init(conf);
RMState rmState = memStore.getState();
Map<ApplicationId, ApplicationStateData> rmAppState =
rmState.getApplicationState();
MockRM rm1 = new TestSecurityMockRM(conf, memStore);
rm1.start();
HashSet<Token<RMDelegationTokenIdentifier>> tokenSet =
new HashSet<Token<RMDelegationTokenIdentifier>>();
// create an empty credential
Credentials ts = new Credentials();
// create tokens and add into credential
Text userText1 = new Text("user1");
RMDelegationTokenIdentifier dtId1 =
new RMDelegationTokenIdentifier(userText1, new Text("renewer1"),
userText1);
Token<RMDelegationTokenIdentifier> token1 =
new Token<RMDelegationTokenIdentifier>(dtId1,
rm1.getRMContext().getRMDelegationTokenSecretManager());
SecurityUtil.setTokenService(token1, rmAddr);
ts.addToken(userText1, token1);
tokenSet.add(token1);
Text userText2 = new Text("user2");
RMDelegationTokenIdentifier dtId2 =
new RMDelegationTokenIdentifier(userText2, new Text("renewer2"),
userText2);
Token<RMDelegationTokenIdentifier> token2 =
new Token<RMDelegationTokenIdentifier>(dtId2,
rm1.getRMContext().getRMDelegationTokenSecretManager());
SecurityUtil.setTokenService(token2, rmAddr);
ts.addToken(userText2, token2);
tokenSet.add(token2);
// submit an app with customized credential
RMApp app = rm1.submitApp(200, "name", "user",
new HashMap<ApplicationAccessType, String>(), false, "default", 1, ts);
// assert app info is saved
ApplicationStateData appState = rmAppState.get(app.getApplicationId());
Assert.assertNotNull(appState);
// assert delegation tokens exist in rm1 DelegationTokenRenewr
Assert.assertEquals(tokenSet, rm1.getRMContext()
.getDelegationTokenRenewer().getDelegationTokens());
// assert delegation tokens are saved
DataOutputBuffer dob = new DataOutputBuffer();
ts.writeTokenStorageToStream(dob);
ByteBuffer securityTokens =
ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
securityTokens.rewind();
Assert.assertEquals(securityTokens, appState
.getApplicationSubmissionContext().getAMContainerSpec()
.getTokens());
// start new RM
MockRM rm2 = new TestSecurityMockRM(conf, memStore);
rm2.start();
// Need to wait for a while as now token renewal happens on another thread
// and is asynchronous in nature.
waitForTokensToBeRenewed(rm2);
// verify tokens are properly populated back to rm2 DelegationTokenRenewer
Assert.assertEquals(tokenSet, rm2.getRMContext()
.getDelegationTokenRenewer().getDelegationTokens());
}