当前位置: 首页>>代码示例>>Java>>正文


Java CommonConfigurationKeys类代码示例

本文整理汇总了Java中org.apache.hadoop.fs.CommonConfigurationKeys的典型用法代码示例。如果您正苦于以下问题:Java CommonConfigurationKeys类的具体用法?Java CommonConfigurationKeys怎么用?Java CommonConfigurationKeys使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


CommonConfigurationKeys类属于org.apache.hadoop.fs包,在下文中一共展示了CommonConfigurationKeys类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testRandomHealthAndDisconnects

import org.apache.hadoop.fs.CommonConfigurationKeys; //导入依赖的package包/类
/**
 * Have the services fail their health checks half the time,
 * causing the master role to bounce back and forth in the
 * cluster. Meanwhile, causes ZK to disconnect clients every
 * 50ms, to trigger the retry code and failures to become active.
 */
@Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000)
public void testRandomHealthAndDisconnects() throws Exception {
  long runFor = STRESS_RUNTIME_SECS * 1000;
  Mockito.doAnswer(new RandomlyThrow(0))
      .when(cluster.getService(0).proxy).monitorHealth();
  Mockito.doAnswer(new RandomlyThrow(1))
      .when(cluster.getService(1).proxy).monitorHealth();
  conf.setInt(CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_KEY, 100);
  // Don't start until after the above mocking. Otherwise we can get
  // Mockito errors if the HM calls the proxy in the middle of
  // setting up the mock.
  cluster.start();
  
  long st = Time.now();
  while (Time.now() - st < runFor) {
    cluster.getTestContext().checkException();
    serverFactory.closeAll();
    Thread.sleep(50);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestZKFailoverControllerStress.java

示例2: testZlibCompressorDecompressorWithConfiguration

import org.apache.hadoop.fs.CommonConfigurationKeys; //导入依赖的package包/类
@Test
public void testZlibCompressorDecompressorWithConfiguration() {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
  if (ZlibFactory.isNativeZlibLoaded(conf)) {
    byte[] rawData;
    int tryNumber = 5;
    int BYTE_SIZE = 10 * 1024;
    Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
    Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
    rawData = generate(BYTE_SIZE);
    try {
      for (int i = 0; i < tryNumber; i++)
        compressDecompressZlib(rawData, (ZlibCompressor) zlibCompressor,
            (ZlibDecompressor) zlibDecompressor);
      zlibCompressor.reinit(conf);
    } catch (Exception ex) {
      fail("testZlibCompressorDecompressorWithConfiguration ex error " + ex);
    }
  } else {
    assertTrue("ZlibFactory is using native libs against request",
        ZlibFactory.isNativeZlibLoaded(conf));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestZlibCompressorDecompressor.java

示例3: getDataNodeProxy

import org.apache.hadoop.fs.CommonConfigurationKeys; //导入依赖的package包/类
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol =     
      DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:DFSAdmin.java

示例4: testNetgroupWithFallback

import org.apache.hadoop.fs.CommonConfigurationKeys; //导入依赖的package包/类
@Test
public void testNetgroupWithFallback() throws Exception {
  LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
      "test the normal path and 'mvn -DTestGroupFallback clear test' will" +
      " test the fall back functionality");
  Logger.getRootLogger().setLevel(Level.DEBUG);
  Configuration conf = new Configuration();
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
      "org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMappingWithFallback");

  Groups groups = new Groups(conf);

  String username = System.getProperty("user.name");
  List<String> groupList = groups.getGroups(username);

  LOG.info(username + " has GROUPS: " + groupList.toString());
  assertTrue(groupList.size() > 0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestGroupFallback.java

示例5: testLogin

import org.apache.hadoop.fs.CommonConfigurationKeys; //导入依赖的package包/类
@Test
public void testLogin() throws IOException {
  String userPrincipal = System.getProperty("user.principal");
  String userKeyTab = System.getProperty("user.keytab");
  Assert.assertNotNull("User principal was not specified", userPrincipal);
  Assert.assertNotNull("User keytab was not specified", userKeyTab);

  Configuration conf = new Configuration();
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  UserGroupInformation.setConfiguration(conf);

  UserGroupInformation ugi = UserGroupInformation
      .loginUserFromKeytabAndReturnUGI(userPrincipal, userKeyTab);

  Assert.assertEquals(AuthenticationMethod.KERBEROS,
      ugi.getAuthenticationMethod());
  
  try {
    UserGroupInformation
    .loginUserFromKeytabAndReturnUGI("[email protected]", userKeyTab);
    Assert.fail("Login should have failed");
  } catch (Exception ex) {
    ex.printStackTrace();
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:27,代码来源:TestUGIWithExternalKdc.java

示例6: setupHM

import org.apache.hadoop.fs.CommonConfigurationKeys; //导入依赖的package包/类
@Before
public void setupHM() throws InterruptedException, IOException {
  Configuration conf = new Configuration();
  conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
  conf.setInt(CommonConfigurationKeys.HA_HM_CHECK_INTERVAL_KEY, 50);
  conf.setInt(CommonConfigurationKeys.HA_HM_CONNECT_RETRY_INTERVAL_KEY, 50);
  conf.setInt(CommonConfigurationKeys.HA_HM_SLEEP_AFTER_DISCONNECT_KEY, 50);
  
  svc = createDummyHAService();
  hm = new HealthMonitor(conf, svc) {
    @Override
    protected HAServiceProtocol createProxy() throws IOException {
      createProxyCount.incrementAndGet();
      if (throwOOMEOnCreate) {
        throw new OutOfMemoryError("oome");
      }
      return super.createProxy();
    }
  };
  LOG.info("Starting health monitor");
  hm.start();
  
  LOG.info("Waiting for HEALTHY signal");    
  waitForState(hm, HealthMonitor.State.SERVICE_HEALTHY);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestHealthMonitor.java

示例7: testCachePreventsImplRequest

import org.apache.hadoop.fs.CommonConfigurationKeys; //导入依赖的package包/类
@Test
public void testCachePreventsImplRequest() throws Exception {
  // Disable negative cache.
  conf.setLong(
    CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 0);
  Groups groups = new Groups(conf);
  groups.cacheGroupsAdd(Arrays.asList(myGroups));
  groups.refresh();
  FakeGroupMapping.clearBlackList();

  assertEquals(0, FakeGroupMapping.getRequestCount());

  // First call hits the wire
  assertTrue(groups.getGroups("me").size() == 2);
  assertEquals(1, FakeGroupMapping.getRequestCount());

  // Second count hits cache
  assertTrue(groups.getGroups("me").size() == 2);
  assertEquals(1, FakeGroupMapping.getRequestCount());
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:21,代码来源:TestGroupsCaching.java

示例8: setUp

import org.apache.hadoop.fs.CommonConfigurationKeys; //导入依赖的package包/类
@Before
public  void setUp() throws IOException { // Setup server for both protocols
  conf = new Configuration();
  conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024);
  // Set RPC engine to protobuf RPC engine
  RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);

  // Create server side implementation
  PBServerImpl serverImpl = new PBServerImpl();
  BlockingService service = TestProtobufRpcProto
      .newReflectiveBlockingService(serverImpl);

  // Get RPC server for server side implementation
  server = new RPC.Builder(conf).setProtocol(TestRpcService.class)
      .setInstance(service).setBindAddress(ADDRESS).setPort(PORT).build();
  addr = NetUtils.getConnectAddress(server);
  
  // now the second protocol
  PBServer2Impl server2Impl = new PBServer2Impl();
  BlockingService service2 = TestProtobufRpc2Proto
      .newReflectiveBlockingService(server2Impl);
  
  server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService2.class,
      service2);
  server.start();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestProtoBufRpc.java

示例9: testDefaultAcl

import org.apache.hadoop.fs.CommonConfigurationKeys; //导入依赖的package包/类
@Test
public void testDefaultAcl() {
  ServiceAuthorizationManager serviceAuthorizationManager = 
      new ServiceAuthorizationManager();
  Configuration conf = new Configuration ();
  // test without setting a default acl
  conf.set(ACL_CONFIG, "user1 group1");
  serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
  AccessControlList acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol.class);
  assertEquals("user1 group1", acl.getAclString());
  acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class);
  assertEquals(AccessControlList.WILDCARD_ACL_VALUE, acl.getAclString());

  // test with a default acl
  conf.set(
      CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL, 
      "user2 group2");
  serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
  acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol.class);
  assertEquals("user1 group1", acl.getAclString());
  acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class);
  assertEquals("user2 group2", acl.getAclString());
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:24,代码来源:TestServiceAuthorization.java

示例10: startUp

import org.apache.hadoop.fs.CommonConfigurationKeys; //导入依赖的package包/类
/**
 * Starts an instance of DataNode
 * @throws IOException
 */
@Before
public void startUp() throws IOException, URISyntaxException {
  tearDownDone = false;
  conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, DATA_DIR);
  conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
  conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
  conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
  FileSystem.setDefaultUri(conf,
      "hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort());
  File dataDir = new File(DATA_DIR);
  FileUtil.fullyDelete(dataDir);
  dataDir.mkdirs();
  StorageLocation location = StorageLocation.parse(dataDir.getPath());
  locations.add(location);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestDatanodeProtocolRetryPolicy.java

示例11: testCacheEntriesExpire

import org.apache.hadoop.fs.CommonConfigurationKeys; //导入依赖的package包/类
@Test
public void testCacheEntriesExpire() throws Exception {
  conf.setLong(
    CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 1);
  FakeTimer timer = new FakeTimer();
  final Groups groups = new Groups(conf, timer);
  groups.cacheGroupsAdd(Arrays.asList(myGroups));
  groups.refresh();
  FakeGroupMapping.clearBlackList();

  // We make an entry
  groups.getGroups("me");
  int startingRequestCount = FakeGroupMapping.getRequestCount();

  timer.advance(20 * 1000);

  // Cache entry has expired so it results in a new fetch
  groups.getGroups("me");
  assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestGroupsCaching.java

示例12: MiniZKFCCluster

import org.apache.hadoop.fs.CommonConfigurationKeys; //导入依赖的package包/类
public MiniZKFCCluster(Configuration conf, ZooKeeperServer zks) {
  this.conf = conf;
  // Fast check interval so tests run faster
  conf.setInt(CommonConfigurationKeys.HA_HM_CHECK_INTERVAL_KEY, 50);
  conf.setInt(CommonConfigurationKeys.HA_HM_CONNECT_RETRY_INTERVAL_KEY, 50);
  conf.setInt(CommonConfigurationKeys.HA_HM_SLEEP_AFTER_DISCONNECT_KEY, 50);
  svcs = new ArrayList<DummyHAService>(2);
  // remove any existing instances we are keeping track of
  DummyHAService.instances.clear();

  for (int i = 0; i < 2; i++) {
    addSvcs(svcs, i);
  }

  this.ctx = new TestContext();
  this.zks = zks;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:18,代码来源:MiniZKFCCluster.java

示例13: testDTInInsecureClusterWithFallback

import org.apache.hadoop.fs.CommonConfigurationKeys; //导入依赖的package包/类
@Test
public void testDTInInsecureClusterWithFallback()
    throws IOException, URISyntaxException {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  conf.setBoolean(CommonConfigurationKeys
      .IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, true);
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);
    Assert.assertNull(webHdfs.getDelegationToken(null));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestWebHDFS.java

示例14: testLz4Codec

import org.apache.hadoop.fs.CommonConfigurationKeys; //导入依赖的package包/类
@Test
public void testLz4Codec() throws IOException {
  if (NativeCodeLoader.isNativeCodeLoaded()) {
    if (Lz4Codec.isNativeCodeLoaded()) {
      conf.setBoolean(
          CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY,
          false);
      codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.Lz4Codec");
      codecTest(conf, seed, count, "org.apache.hadoop.io.compress.Lz4Codec");
      conf.setBoolean(
          CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY,
          true);
      codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.Lz4Codec");
      codecTest(conf, seed, count, "org.apache.hadoop.io.compress.Lz4Codec");
    } else {
      Assert.fail("Native hadoop library available but lz4 not");
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestCodec.java

示例15: testCodecPoolGzipReuse

import org.apache.hadoop.fs.CommonConfigurationKeys; //导入依赖的package包/类
@Test
public void testCodecPoolGzipReuse() throws Exception {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
  if (!ZlibFactory.isNativeZlibLoaded(conf)) {
    LOG.warn("testCodecPoolGzipReuse skipped: native libs not loaded");
    return;
  }
  GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
  DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
  Compressor c1 = CodecPool.getCompressor(gzc);
  Compressor c2 = CodecPool.getCompressor(dfc);
  CodecPool.returnCompressor(c1);
  CodecPool.returnCompressor(c2);
  assertTrue("Got mismatched ZlibCompressor", c2 != CodecPool.getCompressor(gzc));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestCodec.java


注:本文中的org.apache.hadoop.fs.CommonConfigurationKeys类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。