本文整理汇总了Java中org.apache.hadoop.security.authorize.ServiceAuthorizationManager类的典型用法代码示例。如果您正苦于以下问题:Java ServiceAuthorizationManager类的具体用法?Java ServiceAuthorizationManager怎么用?Java ServiceAuthorizationManager使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ServiceAuthorizationManager类属于org.apache.hadoop.security.authorize包,在下文中一共展示了ServiceAuthorizationManager类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: start
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; //导入依赖的package包/类
/** Starts the service. Must be called before any calls will be handled. */
@Override
public synchronized void start() {
if (started) return;
authTokenSecretMgr = createSecretManager();
if (authTokenSecretMgr != null) {
setSecretManager(authTokenSecretMgr);
authTokenSecretMgr.start();
}
this.authManager = new ServiceAuthorizationManager();
HBasePolicyProvider.init(conf, authManager);
responder.start();
listener.start();
scheduler.start();
started = true;
}
示例2: testServiceLevelAuthorization
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; //导入依赖的package包/类
public void testServiceLevelAuthorization() throws Exception {
MiniMRCluster mr = null;
try {
// Turn on service-level authorization
final JobConf conf = new JobConf();
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
MapReducePolicyProvider.class, PolicyProvider.class);
conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG,
true);
// Start the mini mr cluster
mr = new MiniMRCluster(1, "file:///", 1, null, null, conf);
// Invoke MRAdmin commands
MRAdmin mrAdmin = new MRAdmin(mr.createJobConf());
assertEquals(0, mrAdmin.run(new String[] { "-refreshQueues" }));
assertEquals(0, mrAdmin.run(new String[] { "-refreshNodes" }));
} finally {
if (mr != null) {
mr.shutdown();
}
}
}
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:24,代码来源:TestAdminOperationsProtocolWithServiceAuthorization.java
示例3: start
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; //导入依赖的package包/类
/** Starts the service. Must be called before any calls will be handled. */
@Override
public synchronized void start() {
if (started) return;
AuthenticationTokenSecretManager mgr = createSecretManager();
if (mgr != null) {
setSecretManager(mgr);
mgr.start();
}
this.authManager = new ServiceAuthorizationManager();
HBasePolicyProvider.init(conf, authManager);
responder.start();
listener.start();
scheduler.start();
started = true;
}
示例4: startThreads
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; //导入依赖的package包/类
/**
* Starts the service threads but does not allow requests to be responded yet.
* Client will get {@link ServerNotRunningYetException} instead.
*/
@Override
public synchronized void startThreads() {
AuthenticationTokenSecretManager mgr = createSecretManager();
if (mgr != null) {
setSecretManager(mgr);
mgr.start();
}
this.authManager = new ServiceAuthorizationManager();
HBasePolicyProvider.init(conf, authManager);
responder.start();
listener.start();
handlers = startHandlers(callQueue, handlerCount);
priorityHandlers = startHandlers(priorityCallQueue, priorityHandlerCount);
replicationHandlers = startHandlers(replicationQueue, numOfReplicationHandlers);
}
示例5: authorize
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; //导入依赖的package包/类
/**
* Authorize the incoming client connection.
*
* @param user client user
* @param connection incoming connection
* @param addr InetAddress of incoming connection
* @throws AuthorizationException when the client isn't authorized to talk the protocol
*/
public void authorize(UserGroupInformation user,
ConnectionHeader connection,
InetAddress addr
) throws AuthorizationException {
if (authorize) {
Class<?> protocol = null;
try {
protocol = getProtocolClass(connection.getProtocol(), getConf());
} catch (ClassNotFoundException cfne) {
throw new AuthorizationException("Unknown protocol: " +
connection.getProtocol());
}
ServiceAuthorizationManager.authorize(user, protocol, getConf(), addr);
}
}
示例6: verifyServiceACLsRefresh
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; //导入依赖的package包/类
private void verifyServiceACLsRefresh(ServiceAuthorizationManager manager,
Class<?> protocol, String aclString) {
for (Class<?> protocolClass : manager.getProtocolsWithAcls()) {
AccessControlList accessList =
manager.getProtocolsAcls(protocolClass);
if (protocolClass == protocol) {
Assert.assertEquals(accessList.getAclString(),
aclString);
} else {
Assert.assertEquals(accessList.getAclString(), "*");
}
}
}
示例7: init
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; //导入依赖的package包/类
public static void init(Configuration conf, ServiceAuthorizationManager authManager) {
// set service-level authorization security policy
System.setProperty("hadoop.policy.file", "hbase-policy.xml");
if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
authManager.refresh(conf, new HBasePolicyProvider());
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}
}
示例8: Server
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; //导入依赖的package包/类
/** Construct an RPC server.
* @param instance the instance whose methods will be called
* @param conf the configuration to use
* @param bindAddress the address to bind on to listen for connection
* @param port the port to listen for connections on
* @param numHandlers the number of method handler threads to run
* @param verbose whether each call should be logged
* @throws IOException e
*/
public Server(Object instance, final Class<?>[] ifaces,
Configuration conf, String bindAddress, int port,
int numHandlers, int metaHandlerCount, boolean verbose,
int highPriorityLevel) throws IOException {
super(bindAddress, port, Invocation.class, numHandlers, metaHandlerCount,
conf, classNameBase(instance.getClass().getName()),
highPriorityLevel);
this.instance = instance;
this.implementation = instance.getClass();
this.verbose = verbose;
this.ifaces = ifaces;
// create metrics for the advertised interfaces this server implements.
String [] metricSuffixes = new String [] {ABOVE_ONE_SEC_METRIC};
this.rpcMetrics.createMetrics(this.ifaces, false, metricSuffixes);
this.authorize =
conf.getBoolean(
ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false);
this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME,
DEFAULT_WARN_RESPONSE_TIME);
this.warnResponseSize = conf.getInt(WARN_RESPONSE_SIZE,
DEFAULT_WARN_RESPONSE_SIZE);
}
示例9: refreshServiceAcl
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; //导入依赖的package包/类
@Override
public void refreshServiceAcl() throws IOException {
if (!conf.getBoolean(
ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
throw new AuthorizationException("Service Level Authorization not enabled!");
}
SecurityUtil.getPolicy().refresh();
}
示例10: setUp
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; //导入依赖的package包/类
public void setUp() throws Exception {
// Read the testConfig.xml file
readTestConfigFile();
// Start up the mini dfs cluster
boolean success = false;
conf = new Configuration();
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
HadoopPolicyProvider.class, PolicyProvider.class);
conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG,
true);
dfsCluster = new MiniDFSCluster(conf, 1, true, null);
namenode = conf.get("fs.default.name", "file:///");
clitestDataDir = new File(TEST_CACHE_DATA_DIR).
toURI().toString().replace(' ', '+');
username = System.getProperty("user.name");
FileSystem fs = dfsCluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
fs instanceof DistributedFileSystem);
dfs = (DistributedFileSystem) fs;
// Start up mini mr cluster
JobConf mrConf = new JobConf(conf);
mrCluster = new MiniMRCluster(1, dfsCluster.getFileSystem().getUri().toString(), 1,
null, null, mrConf);
jobtracker = mrCluster.createJobConf().get("mapred.job.tracker", "local");
success = true;
assertTrue("Error setting up Mini DFS & MR clusters", success);
}
示例11: testAuthorization
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; //导入依赖的package包/类
@Test
public void testAuthorization() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(
ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, true);
// Expect to succeed
conf.set(ACL_CONFIG, "*");
doRPCs(conf, false);
// Reset authorization to expect failure
conf.set(ACL_CONFIG, "invalid invalid");
doRPCs(conf, true);
}
示例12: Server
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; //导入依赖的package包/类
/** Construct an RPC server.
* @param instance the instance whose methods will be called
* @param conf the configuration to use
* @param bindAddress the address to bind on to listen for connection
* @param port the port to listen for connections on
* @param numHandlers the number of method handler threads to run
* @param verbose whether each call should be logged
* @param supportOldJobConf supports server to deserialize old job conf
*/
public Server(Object instance, Configuration conf, String bindAddress,
int port, int numHandlers, boolean verbose, boolean supportOldJobConf)
throws IOException {
super(bindAddress, port, Invocation.class, numHandlers, conf,
classNameBase(instance.getClass().getName()), supportOldJobConf);
this.instance = instance;
this.fastProtocol = instance instanceof FastProtocol;
this.verbose = verbose;
this.authorize =
conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG,
false);
}
示例13: authorize
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; //导入依赖的package包/类
@Override
public void authorize(Subject user, ConnectionHeader connection)
throws AuthorizationException {
if (authorize) {
Class<?> protocol = null;
try {
protocol = getProtocolClass(connection.getProtocol(), getConf());
} catch (ClassNotFoundException cfne) {
throw new AuthorizationException("Unknown protocol: " +
connection.getProtocol());
}
ServiceAuthorizationManager.authorize(user, protocol);
}
}
示例14: initialize
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; //导入依赖的package包/类
/**
* Initialize name-node.
*
*/
protected void initialize() throws IOException {
// set service-level authorization security policy
if (serviceAuthEnabled =
getConf().getBoolean(
ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
PolicyProvider policyProvider =
(PolicyProvider)(ReflectionUtils.newInstance(
getConf().getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
HDFSPolicyProvider.class, PolicyProvider.class),
getConf()));
SecurityUtil.setPolicy(new ConfiguredPolicy(getConf(), policyProvider));
}
// This is a check that the port is free
// create a socket and bind to it, throw exception if port is busy
// This has to be done before we are reading Namesystem not to waste time and fail fast
NetUtils.isSocketBindable(getClientProtocolAddress(getConf()));
NetUtils.isSocketBindable(getDNProtocolAddress(getConf()));
NetUtils.isSocketBindable(getHttpServerAddress(getConf()));
long serverVersion = ClientProtocol.versionID;
this.clientProtocolMethodsFingerprint = ProtocolSignature
.getMethodsSigFingerPrint(ClientProtocol.class, serverVersion);
myMetrics = new NameNodeMetrics(getConf(), this);
this.clusterName = getConf().get(FSConstants.DFS_CLUSTER_NAME);
this.namesystem = new FSNamesystem(this, getConf());
// HACK: from removal of FSNamesystem.getFSNamesystem().
JspHelper.fsn = this.namesystem;
this.startDNServer();
startHttpServer(getConf());
}
示例15: startThreads
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; //导入依赖的package包/类
/**
* Starts the service threads but does not allow requests to be responded yet.
* Client will get {@link ServerNotRunningYetException} instead.
*/
@Override
public synchronized void startThreads() {
AuthenticationTokenSecretManager mgr = createSecretManager();
if (mgr != null) {
setSecretManager(mgr);
mgr.start();
}
this.authManager = new ServiceAuthorizationManager();
HBasePolicyProvider.init(conf, authManager);
responder.start();
listener.start();
scheduler.start();
}