當前位置: 首頁>>代碼示例>>Java>>正文


Java Configuration.setLong方法代碼示例

本文整理匯總了Java中org.apache.hadoop.conf.Configuration.setLong方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.setLong方法的具體用法?Java Configuration.setLong怎麽用?Java Configuration.setLong使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.setLong方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testFinalizedReplicas

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test public void testFinalizedReplicas() throws Exception {
  // bring up a cluster of 3
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    // test finalized replicas
    final String TopDir = "/test";
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("TestDatanodeRestart").setNumFiles(2).build();
    util.createFiles(fs, TopDir, (short)3);
    util.waitReplication(fs, TopDir, (short)3);
    util.checkFiles(fs, TopDir);
    cluster.restartDataNodes();
    cluster.waitActive();
    util.checkFiles(fs, TopDir);
  } finally {
    cluster.shutdown();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:24,代碼來源:TestDatanodeRestart.java

示例2: testAsyncAPIPollTimeoutHelper

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private void testAsyncAPIPollTimeoutHelper(Long valueForTimeout,
    boolean expectedTimeoutEnforcement) {
  YarnClientImpl client = new YarnClientImpl();
  try {
    Configuration conf = new Configuration();
    if (valueForTimeout != null) {
      conf.setLong(
          YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS,
          valueForTimeout);
    }

    client.init(conf);

    Assert.assertEquals(
        expectedTimeoutEnforcement, client.enforceAsyncAPITimeout());
  } finally {
    IOUtils.closeQuietly(client);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:TestYarnClient.java

示例3: createJob

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public Job createJob(int numMapper, int numReducer, 
                     long mapSleepTime, int mapSleepCount, 
                     long reduceSleepTime, int reduceSleepCount) 
    throws IOException {
  Configuration conf = getConf();
  conf.setLong(MAP_SLEEP_TIME, mapSleepTime);
  conf.setLong(REDUCE_SLEEP_TIME, reduceSleepTime);
  conf.setInt(MAP_SLEEP_COUNT, mapSleepCount);
  conf.setInt(REDUCE_SLEEP_COUNT, reduceSleepCount);
  conf.setInt(MRJobConfig.NUM_MAPS, numMapper);
  Job job = Job.getInstance(conf, "sleep");
  job.setNumReduceTasks(numReducer);
  job.setJarByClass(SleepJob.class);
  job.setMapperClass(SleepMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(SleepReducer.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setInputFormatClass(SleepInputFormat.class);
  job.setPartitionerClass(SleepJobPartitioner.class);
  job.setSpeculativeExecution(false);
  job.setJobName("Sleep job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:SleepJob.java

示例4: hFlushFi01_c

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/** Similar to {@link #hFlushFi01_b()} but writing happens
 * across block and checksum's boundaries
 */
@Test
public void hFlushFi01_c() throws Exception { 
  final String methodName = FiTestUtil.getMethodName();
  Configuration conf = new HdfsConfiguration();
  int customPerChecksumSize = 400;
  int customBlockSize = customPerChecksumSize * 3;
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
  runDiskErrorTest(conf, methodName, 
      customBlockSize, new DerrAction(methodName, 0), 0, true);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:15,代碼來源:TestFiHFlush.java

示例5: setupBeforeClass

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@BeforeClass
public static void setupBeforeClass() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      RowProcessorEndpoint.class.getName());
  conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
  conf.setLong("hbase.hregion.row.processor.timeout", 1000L);
  util.startMiniCluster();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:10,代碼來源:TestRowProcessorEndpoint.java

示例6: testPreReceiptVerificationDfsClientCanDoScr

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test(timeout=60000)
public void testPreReceiptVerificationDfsClientCanDoScr() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testPreReceiptVerificationDfsClientCanDoScr", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  fs.getClient().getConf().brfFailureInjector =
      new TestPreReceiptVerificationFailureInjector();
  final Path TEST_PATH1 = new Path("/test_file1");
  DFSTestUtil.createFile(fs, TEST_PATH1, 4096, (short)1, 0xFADE2);
  final Path TEST_PATH2 = new Path("/test_file2");
  DFSTestUtil.createFile(fs, TEST_PATH2, 4096, (short)1, 0xFADE2);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  ShortCircuitRegistry registry =
      cluster.getDataNodes().get(0).getShortCircuitRegistry();
  registry.visit(new ShortCircuitRegistry.Visitor() {
    @Override
    public void accept(HashMap<ShmId, RegisteredShm> segments,
                       HashMultimap<ExtendedBlockId, Slot> slots) {
      Assert.assertEquals(1, segments.size());
      Assert.assertEquals(2, slots.size());
    }
  });
  cluster.shutdown();
  sockDir.close();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:34,代碼來源:TestShortCircuitCache.java

示例7: testEOFWithRemoteBlockReader

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test(timeout=60000)
public void testEOFWithRemoteBlockReader() throws Exception {
  final Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD, BLOCK_SIZE);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  testEOF(cluster, 1);
  testEOF(cluster, 14);
  testEOF(cluster, 10000);   
  cluster.shutdown();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:12,代碼來源:TestRead.java

示例8: setUp

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Before
public void setUp() throws Exception {
  conf = new Configuration();
  // Stall the standby checkpointer in two ways
  conf.setLong(DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, Long.MAX_VALUE);
  conf.setLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 20);
  // Make it autoroll after 10 edits
  conf.setFloat(DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD, 0.5f);
  conf.setInt(DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS, 100);

  int retryCount = 0;
  while (true) {
    try {
      int basePort = 10060 + random.nextInt(100) * 2;
      MiniDFSNNTopology topology = new MiniDFSNNTopology()
          .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
              .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
              .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));

      cluster = new MiniDFSCluster.Builder(conf)
          .nnTopology(topology)
          .numDataNodes(0)
          .build();
      cluster.waitActive();

      nn0 = cluster.getNameNode(0);
      fs = HATestUtil.configureFailoverFs(cluster, conf);

      cluster.transitionToActive(0);

      fs = cluster.getFileSystem(0);
      editLog = nn0.getNamesystem().getEditLog();
      ++retryCount;
      break;
    } catch (BindException e) {
      LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
          + retryCount + " times");
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:41,代碼來源:TestEditLogAutoroll.java

示例9: testWhenDecreasingReplication

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * The corrupt block has to be removed when the number of valid replicas
 * matches replication factor for the file. In this the above condition is
 * tested by reducing the replication factor 
 * The test strategy : 
 *   Bring up Cluster with 3 DataNodes
 *   Create a file of replication factor 3 
 *   Corrupt one replica of a block of the file 
 *   Verify that there are still 2 good replicas and 1 corrupt replica
 *    (corrupt replica should not be removed since number of good
 *     replicas (2) is less than replication factor (3))
 *   Set the replication factor to 2 
 *   Verify that the corrupt replica is removed. 
 *     (corrupt replica  should not be removed since number of good
 *      replicas (2) is equal to replication factor (2))
 */
@Test
public void testWhenDecreasingReplication() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
  conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fs = cluster.getFileSystem();
  final FSNamesystem namesystem = cluster.getNamesystem();

  try {
    final Path fileName = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short) 3);

    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    corruptBlock(cluster, fs, fileName, 0, block);

    DFSTestUtil.waitReplication(fs, fileName, (short) 2);

    assertEquals(2, countReplicas(namesystem, block).liveReplicas());
    assertEquals(1, countReplicas(namesystem, block).corruptReplicas());

    namesystem.setReplication(fileName.toString(), (short) 2);

    // wait for 3 seconds so that all block reports are processed.
    try {
      Thread.sleep(3000);
    } catch (InterruptedException ignored) {
    }

    assertEquals(2, countReplicas(namesystem, block).liveReplicas());
    assertEquals(0, countReplicas(namesystem, block).corruptReplicas());

  } finally {
    cluster.shutdown();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:54,代碼來源:TestProcessCorruptBlocks.java

示例10: setUp

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Before
public void setUp() throws Exception {
  conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
      .build();
  cluster.waitActive();

  fsn = cluster.getNamesystem();
  fsdir = fsn.getFSDirectory();
  hdfs = cluster.getFileSystem();
  dirTree = new TestDirectoryTree(DIRECTORY_TREE_LEVEL, hdfs);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:14,代碼來源:TestSnapshot.java

示例11: testPluggingInHeapMemoryTuner

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testPluggingInHeapMemoryTuner() throws Exception {
  BlockCacheStub blockCache = new BlockCacheStub((long) (maxHeapSize * 0.4));
  MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4));
  Configuration conf = HBaseConfiguration.create();
  conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.78f);
  conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.05f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.75f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.02f);
  conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000);
  conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0);
  conf.setClass(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_CLASS, CustomHeapMemoryTuner.class,
      HeapMemoryTuner.class);
  // Let the system start with default values for memstore heap and block cache size.
  HeapMemoryManager heapMemoryManager = new HeapMemoryManager(blockCache, memStoreFlusher,
      new RegionServerStub(conf), new RegionServerAccountingStub());
  final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
  heapMemoryManager.start(choreService);
  // Now we wants to be in write mode. Set bigger memstore size from CustomHeapMemoryTuner
  CustomHeapMemoryTuner.memstoreSize = 0.78f;
  CustomHeapMemoryTuner.blockCacheSize = 0.02f;
  // Allow the tuner to run once and do necessary memory up
  waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize);
  assertHeapSpace(0.78f, memStoreFlusher.memstoreSize);// Memstore
  assertHeapSpace(0.02f, blockCache.maxSize);// BlockCache
  // Now we wants to be in read mode. Set bigger memstore size from CustomHeapMemoryTuner
  CustomHeapMemoryTuner.blockCacheSize = 0.75f;
  CustomHeapMemoryTuner.memstoreSize = 0.05f;
  // Allow the tuner to run once and do necessary memory up
  waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize);
  assertHeapSpace(0.75f, blockCache.maxSize);// BlockCache
  assertHeapSpace(0.05f, memStoreFlusher.memstoreSize);// Memstore
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:34,代碼來源:TestHeapMemoryManager.java

示例12: testRevocation

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Test that when we have an uncache request, and the client refuses to release
 * the replica for a long time, we will un-mlock it.
 */
@Test(timeout=120000)
public void testRevocation() throws Exception {
  assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
  BlockReaderTestUtil.enableHdfsCachingTracing();
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  Configuration conf = getDefaultConf();
  // Set a really short revocation timeout.
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS, 250L);
  // Poll very often
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L);
  MiniDFSCluster cluster = null;
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem dfs = cluster.getFileSystem();

  // Create and cache a file.
  final String TEST_FILE = "/test_file2";
  DFSTestUtil.createFile(dfs, new Path(TEST_FILE),
      BLOCK_SIZE, (short)1, 0xcafe);
  dfs.addCachePool(new CachePoolInfo("pool"));
  long cacheDirectiveId =
      dfs.addCacheDirective(new CacheDirectiveInfo.Builder().
          setPool("pool").setPath(new Path(TEST_FILE)).
          setReplication((short) 1).build());
  FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
  DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);

  // Mmap the file.
  FSDataInputStream in = dfs.open(new Path(TEST_FILE));
  ByteBuffer buf =
      in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class));

  // Attempt to uncache file.  The file should get uncached.
  LOG.info("removing cache directive {}", cacheDirectiveId);
  dfs.removeCacheDirective(cacheDirectiveId);
  LOG.info("finished removing cache directive {}", cacheDirectiveId);
  Thread.sleep(1000);
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);

  // Cleanup
  in.releaseBuffer(buf);
  in.close();
  cluster.shutdown();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:49,代碼來源:TestFsDatasetCacheRevocation.java

示例13: testDataXceiverHandlesRequestShortCircuitShmFailure

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test(timeout=60000)
public void testDataXceiverHandlesRequestShortCircuitShmFailure()
    throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testDataXceiverHandlesRequestShortCircuitShmFailure", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path TEST_PATH1 = new Path("/test_file1");
  DFSTestUtil.createFile(fs, TEST_PATH1, 4096,
      (short)1, 0xFADE1);
  LOG.info("Setting failure injector and performing a read which " +
      "should fail...");
  DataNodeFaultInjector failureInjector = Mockito.mock(DataNodeFaultInjector.class);
  Mockito.doAnswer(new Answer<Void>() {
    @Override
    public Void answer(InvocationOnMock invocation) throws Throwable {
      throw new IOException("injected error into sendShmResponse");
    }
  }).when(failureInjector).sendShortCircuitShmResponse();
  DataNodeFaultInjector prevInjector = DataNodeFaultInjector.instance;
  DataNodeFaultInjector.instance = failureInjector;

  try {
    // The first read will try to allocate a shared memory segment and slot.
    // The shared memory segment allocation will fail because of the failure
    // injector.
    DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
    Assert.fail("expected readFileBuffer to fail, but it succeeded.");
  } catch (Throwable t) {
    GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
        "testing, but we failed to do a non-TCP read.", t);
  }

  checkNumberOfSegmentsAndSlots(0, 0,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());

  LOG.info("Clearing failure injector and performing another read...");
  DataNodeFaultInjector.instance = prevInjector;

  fs.getClient().getClientContext().getDomainSocketFactory().clearPathMap();

  // The second read should succeed.
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);

  // We should have added a new short-circuit shared memory segment and slot.
  checkNumberOfSegmentsAndSlots(1, 1,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());

  cluster.shutdown();
  sockDir.close();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:58,代碼來源:TestShortCircuitCache.java

示例14: testMapCount

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public void testMapCount() throws Exception {
  String namenode = null;
  MiniDFSCluster dfs = null;
  MiniDFSCluster mr = null;
  try {
    Configuration conf = new Configuration();
    
    dfs= new MiniDFSCluster.Builder(conf).numDataNodes(3).format(true).build();
    
    FileSystem fs = dfs.getFileSystem();
    final FsShell shell = new FsShell(conf);
    namenode = fs.getUri().toString();
    MyFile[] files = createFiles(fs.getUri(), "/srcdat");
    long totsize = 0;
    for (MyFile f : files) {
      totsize += f.getSize();
    }
    
    Configuration job = new JobConf(conf);
    job.setLong("distcp.bytes.per.map", totsize / 3);
    ToolRunner.run(new DistCpV1(job),
        new String[] {"-m", "100",
                      "-log",
                      namenode+"/logs",
                      namenode+"/srcdat",
                      namenode+"/destdat"});
    assertTrue("Source and destination directories do not match.",
               checkFiles(fs, "/destdat", files));

    String logdir = namenode + "/logs";
    System.out.println(execCmd(shell, "-lsr", logdir));
    FileStatus[] logs = fs.listStatus(new Path(logdir));
    // rare case where splits are exact, logs.length can be 4
    assertTrue( logs.length == 2);

    deldir(fs, "/destdat");
    deldir(fs, "/logs");
    ToolRunner.run(new DistCpV1(job),
        new String[] {"-m", "1",
                      "-log",
                      namenode+"/logs",
                      namenode+"/srcdat",
                      namenode+"/destdat"});

    System.out.println(execCmd(shell, "-lsr", logdir));
    logs = fs.globStatus(new Path(namenode+"/logs/part*"));
    assertTrue("Unexpected map count, logs.length=" + logs.length,
        logs.length == 1);
  } finally {
    if (dfs != null) { dfs.shutdown(); }
    if (mr != null) { mr.shutdown(); }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:54,代碼來源:TestCopyFiles.java

示例15: disableBlockScanner

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private static void disableBlockScanner(Configuration conf) {
  conf.setLong(DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND, 0L);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:4,代碼來源:TestBlockScanner.java


注:本文中的org.apache.hadoop.conf.Configuration.setLong方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。