當前位置: 首頁>>代碼示例>>Java>>正文


Java Configuration.setBoolean方法代碼示例

本文整理匯總了Java中org.apache.hadoop.conf.Configuration.setBoolean方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.setBoolean方法的具體用法?Java Configuration.setBoolean怎麽用?Java Configuration.setBoolean使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.setBoolean方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testMapTasksOnlySleepJobs

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test  (timeout=600000)
public void testMapTasksOnlySleepJobs() throws Exception {
  Configuration configuration = GridmixTestUtils.mrvl.getConfig();

  DebugJobProducer jobProducer = new DebugJobProducer(5, configuration);
  configuration.setBoolean(SleepJob.SLEEPJOB_MAPTASK_ONLY, true);

  UserGroupInformation ugi = UserGroupInformation.getLoginUser();
  JobStory story;
  int seq = 1;
  while ((story = jobProducer.getNextJob()) != null) {
    GridmixJob gridmixJob = JobCreator.SLEEPJOB.createGridmixJob(configuration, 0,
            story, new Path("ignored"), ugi, seq++);
    gridmixJob.buildSplits(null);
    Job job = gridmixJob.call();
    assertEquals(0, job.getNumReduceTasks());
  }
  jobProducer.close();
  assertEquals(6, seq);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:TestSleepJob.java

示例2: setUpHaCluster

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private void setUpHaCluster(boolean security) throws Exception {
  conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
      security);
  cluster = new MiniQJMHACluster.Builder(conf).build();
  setHAConf(conf, cluster.getDfsCluster().getNameNode(0).getHostAndPort(),
      cluster.getDfsCluster().getNameNode(1).getHostAndPort());
  cluster.getDfsCluster().getNameNode(0).getHostAndPort();
  admin = new DFSAdmin();
  admin.setConf(conf);
  assertTrue(HAUtil.isHAEnabled(conf, "ns1"));

  originOut = System.out;
  originErr = System.err;
  System.setOut(new PrintStream(out));
  System.setErr(new PrintStream(err));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:TestDFSAdminWithHA.java

示例3: testFailTask

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
//First attempt is failed and second attempt is passed
//The job succeeds.
public void testFailTask() throws Exception {
  MRApp app = new MockFirstFailingAttemptMRApp(1, 0);
  Configuration conf = new Configuration();
  // this test requires two task attempts, but uberization overrides max to 1
  conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
  Job job = app.submit(conf);
  app.waitForState(job, JobState.SUCCEEDED);
  Map<TaskId,Task> tasks = job.getTasks();
  Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
  Task task = tasks.values().iterator().next();
  Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED,
      task.getReport().getTaskState());
  Map<TaskAttemptId, TaskAttempt> attempts =
      tasks.values().iterator().next().getAttempts();
  Assert.assertEquals("Num attempts is not correct", 2, attempts.size());
  //one attempt must be failed 
  //and another must have succeeded
  Iterator<TaskAttempt> it = attempts.values().iterator();
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
      it.next().getReport().getTaskAttemptState());
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED,
      it.next().getReport().getTaskAttemptState());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:27,代碼來源:TestFail.java

示例4: test

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void test() {
  Configuration conf = HBaseConfiguration.create();
  conf.setBoolean(AsyncRpcClient.USE_GLOBAL_EVENT_LOOP_GROUP, true);
  AsyncRpcClient client = new AsyncRpcClient(conf);
  assertNotNull(AsyncRpcClient.GLOBAL_EVENT_LOOP_GROUP);
  AsyncRpcClient client1 = new AsyncRpcClient(conf);
  assertSame(client.bootstrap.group(), client1.bootstrap.group());
  client1.close();
  assertFalse(client.bootstrap.group().isShuttingDown());

  conf.setBoolean(AsyncRpcClient.USE_GLOBAL_EVENT_LOOP_GROUP, false);
  AsyncRpcClient client2 = new AsyncRpcClient(conf);
  assertNotSame(client.bootstrap.group(), client2.bootstrap.group());
  client2.close();

  client.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:TestGlobalEventLoopGroup.java

示例5: testConfigurationPreserved

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testConfigurationPreserved() throws Throwable {
  Configuration conf = new Configuration();
  conf.setBoolean("_ENABLED", false);
  conf.setLong("_PRIORITY", 10);
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("table"));
  Constraints.add(desc, AlsoWorks.class, conf);
  Constraints.add(desc, WorksConstraint.class);
  assertFalse(Constraints.enabled(desc, AlsoWorks.class));
  List<? extends Constraint> constraints = Constraints.getConstraints(desc,
      this.getClass().getClassLoader());
  for (Constraint c : constraints) {
    Configuration storedConf = c.getConf();
    if (c instanceof AlsoWorks)
      assertEquals(10, storedConf.getLong("_PRIORITY", -1));
    // its just a worksconstraint
    else
      assertEquals(2, storedConf.getLong("_PRIORITY", -1));

  }

}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestConstraints.java

示例6: testFsShutdownHook

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public void testFsShutdownHook() throws Exception {
  final Set<FileSystem> closed = Collections.synchronizedSet(new HashSet<FileSystem>());
  Configuration conf = new Configuration();
  Configuration confNoAuto = new Configuration();

  conf.setClass("fs.test.impl", TestShutdownFileSystem.class, FileSystem.class);
  confNoAuto.setClass("fs.test.impl", TestShutdownFileSystem.class, FileSystem.class);
  confNoAuto.setBoolean("fs.automatic.close", false);

  TestShutdownFileSystem fsWithAuto =
    (TestShutdownFileSystem)(new Path("test://a/").getFileSystem(conf));
  TestShutdownFileSystem fsWithoutAuto =
    (TestShutdownFileSystem)(new Path("test://b/").getFileSystem(confNoAuto));

  fsWithAuto.setClosedSet(closed);
  fsWithoutAuto.setClosedSet(closed);

  // Different URIs should result in different FS instances
  assertNotSame(fsWithAuto, fsWithoutAuto);

  FileSystem.CACHE.closeAll(true);
  assertEquals(1, closed.size());
  assertTrue(closed.contains(fsWithAuto));

  closed.clear();

  FileSystem.closeAll();
  assertEquals(1, closed.size());
  assertTrue(closed.contains(fsWithoutAuto));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:31,代碼來源:TestFileSystem.java

示例7: setupCluster

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@BeforeClass
public static void setupCluster() {
  final Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitActive();

    //change root permission to 777
    cluster.getFileSystem().setPermission(
        new Path("/"), new FsPermission((short)0777));

    final String uri = WebHdfsFileSystem.SCHEME  + "://"
        + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);

    //get file system as a non-superuser
    final UserGroupInformation current = UserGroupInformation.getCurrentUser();
    final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
        current.getShortUserName() + "x", new String[]{"user"});
    fileSystem = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
      @Override
      public FileSystem run() throws Exception {
        return FileSystem.get(new URI(uri), conf);
      }
    });

    defaultWorkingDirectory = fileSystem.getWorkingDirectory();
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:33,代碼來源:TestFSMainOperationsWebHdfs.java

示例8: setUp

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Before
public void setUp() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong(Constants.MIN_MULTIPART_THRESHOLD, 5 * 1024 * 1024);
  conf.setInt(Constants.MULTIPART_SIZE, 5 * 1024 * 1024);
  conf.setBoolean(Constants.FAST_UPLOAD, true);
  fs = S3ATestUtils.createTestFileSystem(conf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:9,代碼來源:TestS3AFastOutputStream.java

示例9: setupBeforeClass

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@BeforeClass
public static void setupBeforeClass() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  // Enable the favored nodes based load balancer
  conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
      FavoredNodeLoadBalancer.class, LoadBalancer.class);
  conf.setBoolean("hbase.tests.use.shortcircuit.reads", false);
  TEST_UTIL.startMiniCluster(SLAVES);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:10,代碼來源:TestRegionPlacement2.java

示例10: testConfigKeyEnabled

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test (timeout=300000)
public void testConfigKeyEnabled() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, true);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1).build();
  try {
    cluster.waitActive();
    cluster.getFileSystem().setStoragePolicy(new Path("/"),
        HdfsConstants.COLD_STORAGE_POLICY_NAME);
  } finally {
    cluster.shutdown();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:15,代碼來源:TestBlockStoragePolicy.java

示例11: doTestShortCircuitReadWithRemoteBlockReader

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Test that file data can be read by reading the block
 * through RemoteBlockReader
 * @throws IOException
*/
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
                                                        int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
           .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  // check that / exists
  Path path = new Path("/");
  URI uri = cluster.getURI();
  assertTrue("/ should be a directory", fs.getFileStatus(path)
              .isDirectory() == true);

  byte[] fileData = AppendTestUtil.randomBytes(seed, size);
  Path file1 = new Path("filelocal.dat");
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(fileData);
  stm.close();
  try {
    checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
        conf, shortCircuitFails);
    //RemoteBlockReader have unsupported method read(ByteBuffer bf)
    assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
                  checkUnsupportedMethod(fs, file1, fileData, readOffset));
  } catch(IOException e) {
    throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
  } catch(InterruptedException inEx) {
    throw inEx;
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:42,代碼來源:TestShortCircuitLocalRead.java

示例12: setUpBeforeClass

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("dfs.support.append", true);
  // The below config supported by 0.20-append and CDH3b2
  conf.setInt("dfs.client.block.recovery.retries", 2);
  TEST_UTIL.startMiniCluster(3);
  Path hbaseRootDir =
    TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));
  LOG.info("hbase.rootdir=" + hbaseRootDir);
  FSUtils.setRootDir(conf, hbaseRootDir);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:13,代碼來源:TestWALReplay.java

示例13: testSecondaryNameNodeWithDelegationTokens

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Regression test for HDFS-3835 - "Long-lived 2NN cannot perform a
 * checkpoint if security is enabled and the NN restarts without outstanding
 * delegation tokens"
 */
@Test
public void testSecondaryNameNodeWithDelegationTokens() throws IOException {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
        .format(true).build();
    
    assertNotNull(cluster.getNamesystem().getDelegationToken(new Text("atm")));

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once, so the 2NN loads the DT into its in-memory sate.
    secondary.doCheckpoint();
    
    // Perform a saveNamespace, so that the NN has a new fsimage, and the 2NN
    // therefore needs to download a new fsimage the next time it performs a
    // checkpoint.
    cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    cluster.getNameNodeRpc().saveNamespace();
    cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
    
    // Ensure that the 2NN can still perform a checkpoint.
    secondary.doCheckpoint();
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:41,代碼來源:TestCheckpoint.java

示例14: setup

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@BeforeClass
public static void setup() throws Exception {
  conf = new Configuration();
  conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
  conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
      YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
  conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
  conf.setLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 0);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:11,代碼來源:TestAMRMClientOnRMRestart.java

示例15: testNoPings

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Switch off setting socketTimeout values on RPC sockets.
 * Verify that RPC calls still work ok.
 */
public void testNoPings() throws Exception {
  Configuration conf = new Configuration();

  conf.setBoolean("ipc.client.ping", false);
  new TestRPC().testCallsInternal(conf);

  conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY, 2);
  new TestRPC().testCallsInternal(conf);
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:14,代碼來源:TestRPC.java


注:本文中的org.apache.hadoop.conf.Configuration.setBoolean方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。