当前位置: 首页>>代码示例>>Java>>正文


Java SecurityUtil类代码示例

本文整理汇总了Java中org.apache.hadoop.security.SecurityUtil的典型用法代码示例。如果您正苦于以下问题:Java SecurityUtil类的具体用法?Java SecurityUtil怎么用?Java SecurityUtil使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


SecurityUtil类属于org.apache.hadoop.security包,在下文中一共展示了SecurityUtil类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initSpnego

import org.apache.hadoop.security.SecurityUtil; //导入依赖的package包/类
private void initSpnego(Configuration conf, String hostName,
    String usernameConfKey, String keytabConfKey) throws IOException {
  Map<String, String> params = new HashMap<>();
  String principalInConf = conf.get(usernameConfKey);
  if (principalInConf != null && !principalInConf.isEmpty()) {
    params.put("kerberos.principal", SecurityUtil.getServerPrincipal(
        principalInConf, hostName));
  }
  String httpKeytab = conf.get(keytabConfKey);
  if (httpKeytab != null && !httpKeytab.isEmpty()) {
    params.put("kerberos.keytab", httpKeytab);
  }
  params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");

  defineFilter(webAppContext, SPNEGO_FILTER,
               AuthenticationFilter.class.getName(), params, null);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:18,代码来源:HttpServer2.java

示例2: createHAProxy

import org.apache.hadoop.security.SecurityUtil; //导入依赖的package包/类
/**
 * Creates an explicitly HA-enabled proxy object.
 *
 * @param conf the configuration object
 * @param nameNodeUri the URI pointing either to a specific NameNode or to a
 *        logical nameservice.
 * @param xface the IPC interface which should be created
 * @param failoverProxyProvider Failover proxy provider
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to
 */
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createHAProxy(
    Configuration conf, URI nameNodeUri, Class<T> xface,
    AbstractNNFailoverProxyProvider<T> failoverProxyProvider) {
  Preconditions.checkNotNull(failoverProxyProvider);
  // HA case
  DfsClientConf config = new DfsClientConf(conf);
  T proxy = (T) RetryProxy.create(xface, failoverProxyProvider,
      RetryPolicies.failoverOnNetworkException(
          RetryPolicies.TRY_ONCE_THEN_FAIL, config.getMaxFailoverAttempts(),
          config.getMaxRetryAttempts(), config.getFailoverSleepBaseMillis(),
          config.getFailoverSleepMaxMillis()));

  Text dtService;
  if (failoverProxyProvider.useLogicalURI()) {
    dtService = HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri,
        HdfsConstants.HDFS_URI_SCHEME);
  } else {
    dtService = SecurityUtil.buildTokenService(
        NuCypherExtUtilClient.getNNAddress(nameNodeUri));
  }
  return new ProxyAndInfo<>(proxy, dtService,
      NuCypherExtUtilClient.getNNAddressCheckLogical(conf, nameNodeUri));
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:36,代码来源:NuCypherExtNameNodeProxiesClient.java

示例3: addHistoryToken

import org.apache.hadoop.security.SecurityUtil; //导入依赖的package包/类
@VisibleForTesting
void addHistoryToken(Credentials ts) throws IOException, InterruptedException {
	/* check if we have a hsproxy, if not, no need */
	MRClientProtocol hsProxy = clientCache.getInitializedHSProxy();
	if (UserGroupInformation.isSecurityEnabled() && (hsProxy != null)) {
		/*
		 * note that get delegation token was called. Again this is hack for
		 * oozie to make sure we add history server delegation tokens to the
		 * credentials
		 */
		RMDelegationTokenSelector tokenSelector = new RMDelegationTokenSelector();
		Text service = resMgrDelegate.getRMDelegationTokenService();
		if (tokenSelector.selectToken(service, ts.getAllTokens()) != null) {
			Text hsService = SecurityUtil.buildTokenService(hsProxy.getConnectAddress());
			if (ts.getToken(hsService) == null) {
				ts.addToken(hsService, getDelegationTokenFromHS(hsProxy));
			}
		}
	}
}
 
开发者ID:liuhaozzu,项目名称:big_data,代码行数:21,代码来源:YARNRunner.java

示例4: createSecureConfig

import org.apache.hadoop.security.SecurityUtil; //导入依赖的package包/类
private Configuration createSecureConfig(String dataTransferProtection) throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
  conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
  conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection);
  conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
  conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);
  conf.set(DFS_ENCRYPT_DATA_TRANSFER_KEY,
           "true");//https://issues.apache.org/jira/browse/HDFS-7431
  String keystoresDir = baseDir.getAbsolutePath();
  String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  return conf;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:22,代码来源:TestWithSecureMiniDFSCluster.java

示例5: cancelDelegationToken

import org.apache.hadoop.security.SecurityUtil; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void cancelDelegationToken(
    final Token<TimelineDelegationTokenIdentifier> timelineDT)
        throws IOException, YarnException {
  final boolean isTokenServiceAddrEmpty =
      timelineDT.getService().toString().isEmpty();
  final String scheme = isTokenServiceAddrEmpty ? null
      : (YarnConfiguration.useHttps(this.getConfig()) ? "https" : "http");
  final InetSocketAddress address = isTokenServiceAddrEmpty ? null
      : SecurityUtil.getTokenServiceAddr(timelineDT);
  PrivilegedExceptionAction<Void> cancelDTAction =
      new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
          // If the timeline DT to cancel is different than cached, replace it.
          // Token to set every time for retry, because when exception happens,
          // DelegationTokenAuthenticatedURL will reset it to null;
          if (!timelineDT.equals(token.getDelegationToken())) {
            token.setDelegationToken((Token) timelineDT);
          }
          DelegationTokenAuthenticatedURL authUrl =
              new DelegationTokenAuthenticatedURL(authenticator,
                  connConfigurator);
          // If the token service address is not available, fall back to use
          // the configured service address.
          final URI serviceURI = isTokenServiceAddrEmpty ? resURI
              : new URI(scheme, null, address.getHostName(),
              address.getPort(), RESOURCE_URI_STR, null, null);
          authUrl.cancelDelegationToken(serviceURI.toURL(), token, doAsUser);
          return null;
        }
      };
  operateDelegationToken(cancelDTAction);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TimelineClientImpl.java

示例6: getTokenService

import org.apache.hadoop.security.SecurityUtil; //导入依赖的package包/类
@Unstable
public static Text getTokenService(Configuration conf, String address,
    String defaultAddr, int defaultPort) {
  if (HAUtil.isHAEnabled(conf)) {
    // Build a list of service addresses to form the service name
    ArrayList<String> services = new ArrayList<String>();
    YarnConfiguration yarnConf = new YarnConfiguration(conf);
    for (String rmId : HAUtil.getRMHAIds(conf)) {
      // Set RM_ID to get the corresponding RM_ADDRESS
      yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
      services.add(SecurityUtil.buildTokenService(
          yarnConf.getSocketAddr(address, defaultAddr, defaultPort))
          .toString());
    }
    return new Text(Joiner.on(',').join(services));
  }

  // Non-HA case - no need to set RM_ID
  return SecurityUtil.buildTokenService(conf.getSocketAddr(address,
    defaultAddr, defaultPort));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:ClientRMProxy.java

示例7: convertFromYarn

import org.apache.hadoop.security.SecurityUtil; //导入依赖的package包/类
/**
 * Convert a protobuf token into a rpc token and set its service. Supposed
 * to be used for tokens other than RMDelegationToken. For
 * RMDelegationToken, use
 * {@link #convertFromYarn(org.apache.hadoop.yarn.api.records.Token,
 * org.apache.hadoop.io.Text)} instead.
 *
 * @param protoToken the yarn token
 * @param serviceAddr the connect address for the service
 * @return rpc token
 */
public static <T extends TokenIdentifier> Token<T> convertFromYarn(
    org.apache.hadoop.yarn.api.records.Token protoToken,
    InetSocketAddress serviceAddr) {
  Token<T> token = new Token<T>(protoToken.getIdentifier().array(),
                                protoToken.getPassword().array(),
                                new Text(protoToken.getKind()),
                                new Text(protoToken.getService()));
  if (serviceAddr != null) {
    SecurityUtil.setTokenService(token, serviceAddr);
  }
  return token;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:ConverterUtils.java

示例8: allocate

import org.apache.hadoop.security.SecurityUtil; //导入依赖的package包/类
@Override
public synchronized Allocation allocate(
    ApplicationAttemptId applicationAttemptId, List<ResourceRequest> ask,
    List<ContainerId> release, 
    List<String> blacklistAdditions, List<String> blacklistRemovals) {
  List<ResourceRequest> askCopy = new ArrayList<ResourceRequest>();
  for (ResourceRequest req : ask) {
    ResourceRequest reqCopy = ResourceRequest.newInstance(req
        .getPriority(), req.getResourceName(), req.getCapability(), req
        .getNumContainers(), req.getRelaxLocality());
    askCopy.add(reqCopy);
  }
  SecurityUtil.setTokenServiceUseIp(false);
  lastAsk = ask;
  lastRelease = release;
  lastBlacklistAdditions = blacklistAdditions;
  lastBlacklistRemovals = blacklistRemovals;
  return super.allocate(
      applicationAttemptId, askCopy, release, 
      blacklistAdditions, blacklistRemovals);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestRMContainerAllocator.java

示例9: addHistoryToken

import org.apache.hadoop.security.SecurityUtil; //导入依赖的package包/类
@VisibleForTesting
void addHistoryToken(Credentials ts) throws IOException, InterruptedException {
  /* check if we have a hsproxy, if not, no need */
  MRClientProtocol hsProxy = clientCache.getInitializedHSProxy();
  if (UserGroupInformation.isSecurityEnabled() && (hsProxy != null)) {
    /*
     * note that get delegation token was called. Again this is hack for oozie
     * to make sure we add history server delegation tokens to the credentials
     */
    RMDelegationTokenSelector tokenSelector = new RMDelegationTokenSelector();
    Text service = resMgrDelegate.getRMDelegationTokenService();
    if (tokenSelector.selectToken(service, ts.getAllTokens()) != null) {
      Text hsService = SecurityUtil.buildTokenService(hsProxy
          .getConnectAddress());
      if (ts.getToken(hsService) == null) {
        ts.addToken(hsService, getDelegationTokenFromHS(hsProxy));
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:YARNRunner.java

示例10: setUp

import org.apache.hadoop.security.SecurityUtil; //导入依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();
  
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");
  
  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();
  
  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 
  
  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestBinaryTokenFile.java

示例11: renew

import org.apache.hadoop.security.SecurityUtil; //导入依赖的package包/类
@Override
public long renew(Token<?> token, Configuration conf) throws IOException,
    InterruptedException {

  org.apache.hadoop.yarn.api.records.Token dToken =
      org.apache.hadoop.yarn.api.records.Token.newInstance(
        token.getIdentifier(), token.getKind().toString(),
        token.getPassword(), token.getService().toString());

  MRClientProtocol histProxy = instantiateHistoryProxy(conf,
      SecurityUtil.getTokenServiceAddr(token));
  try {
    RenewDelegationTokenRequest request = Records
        .newRecord(RenewDelegationTokenRequest.class);
    request.setDelegationToken(dToken);
    return histProxy.renewDelegationToken(request).getNextExpirationTime();
  } finally {
    stopHistoryProxy(histProxy);
  }

}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:MRDelegationTokenRenewer.java

示例12: RpcProgramMountd

import org.apache.hadoop.security.SecurityUtil; //导入依赖的package包/类
public RpcProgramMountd(NfsConfiguration config,
    DatagramSocket registrationSocket, boolean allowInsecurePorts)
    throws IOException {
  // Note that RPC cache is not enabled
  super("mountd", "localhost", config.getInt(
      NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY,
      NfsConfigKeys.DFS_NFS_MOUNTD_PORT_DEFAULT), PROGRAM, VERSION_1,
      VERSION_3, registrationSocket, allowInsecurePorts);
  exports = new ArrayList<String>();
  exports.add(config.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY,
      NfsConfigKeys.DFS_NFS_EXPORT_POINT_DEFAULT));
  this.hostsMatcher = NfsExports.getInstance(config);
  this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>());
  UserGroupInformation.setConfiguration(config);
  SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
      NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
  this.dfsClient = new DFSClient(NameNode.getAddress(config), config);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:RpcProgramMountd.java

示例13: getAuthMethods

import org.apache.hadoop.security.SecurityUtil; //导入依赖的package包/类
private List<AuthMethod> getAuthMethods(SecretManager<?> secretManager,
                                           Configuration conf) {
  AuthenticationMethod confAuthenticationMethod =
      SecurityUtil.getAuthenticationMethod(conf);        
  List<AuthMethod> authMethods = new ArrayList<AuthMethod>();
  if (confAuthenticationMethod == AuthenticationMethod.TOKEN) {
    if (secretManager == null) {
      throw new IllegalArgumentException(AuthenticationMethod.TOKEN +
          " authentication requires a secret manager");
    } 
  } else if (secretManager != null) {
    LOG.debug(AuthenticationMethod.TOKEN +
        " authentication enabled for secret manager");
    // most preferred, go to the front of the line!
    authMethods.add(AuthenticationMethod.TOKEN.getAuthMethod());
  }
  authMethods.add(confAuthenticationMethod.getAuthMethod());        
  
  LOG.debug("Server accepts auth methods:" + authMethods);
  return authMethods;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:Server.java

示例14: renewDelegationToken

import org.apache.hadoop.security.SecurityUtil; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public long renewDelegationToken(final Token<?> token) throws IOException {
  // update the kerberos credentials, if they are coming from a keytab
  UserGroupInformation connectUgi = ugi.getRealUser();
  if (connectUgi == null) {
    connectUgi = ugi;
  }
  try {
    return connectUgi.doAs(new PrivilegedExceptionAction<Long>() {
      @Override
      public Long run() throws Exception {
        InetSocketAddress serviceAddr = SecurityUtil
            .getTokenServiceAddr(token);
        return DelegationTokenFetcher.renewDelegationToken(connectionFactory,
            DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
            (Token<DelegationTokenIdentifier>) token);
      }
    });
  } catch (InterruptedException e) {
    throw new IOException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:HftpFileSystem.java

示例15: cancelDelegationToken

import org.apache.hadoop.security.SecurityUtil; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void cancelDelegationToken(final Token<?> token) throws IOException {
  UserGroupInformation connectUgi = ugi.getRealUser();
  if (connectUgi == null) {
    connectUgi = ugi;
  }
  try {
    connectUgi.doAs(new PrivilegedExceptionAction<Void>() {
      @Override
      public Void run() throws Exception {
        InetSocketAddress serviceAddr = SecurityUtil
            .getTokenServiceAddr(token);
        DelegationTokenFetcher.cancelDelegationToken(connectionFactory,
            DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
            (Token<DelegationTokenIdentifier>) token);
        return null;
      }
    });
  } catch (InterruptedException e) {
    throw new IOException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:HftpFileSystem.java


注:本文中的org.apache.hadoop.security.SecurityUtil类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。