当前位置: 首页>>代码示例>>Java>>正文


Java Configuration.setLong方法代码示例

本文整理汇总了Java中org.apache.hadoop.conf.Configuration.setLong方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.setLong方法的具体用法?Java Configuration.setLong怎么用?Java Configuration.setLong使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.setLong方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testFinalizedReplicas

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test public void testFinalizedReplicas() throws Exception {
  // bring up a cluster of 3
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    // test finalized replicas
    final String TopDir = "/test";
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("TestDatanodeRestart").setNumFiles(2).build();
    util.createFiles(fs, TopDir, (short)3);
    util.waitReplication(fs, TopDir, (short)3);
    util.checkFiles(fs, TopDir);
    cluster.restartDataNodes();
    cluster.waitActive();
    util.checkFiles(fs, TopDir);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestDatanodeRestart.java

示例2: testAsyncAPIPollTimeoutHelper

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private void testAsyncAPIPollTimeoutHelper(Long valueForTimeout,
    boolean expectedTimeoutEnforcement) {
  YarnClientImpl client = new YarnClientImpl();
  try {
    Configuration conf = new Configuration();
    if (valueForTimeout != null) {
      conf.setLong(
          YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS,
          valueForTimeout);
    }

    client.init(conf);

    Assert.assertEquals(
        expectedTimeoutEnforcement, client.enforceAsyncAPITimeout());
  } finally {
    IOUtils.closeQuietly(client);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestYarnClient.java

示例3: createJob

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public Job createJob(int numMapper, int numReducer, 
                     long mapSleepTime, int mapSleepCount, 
                     long reduceSleepTime, int reduceSleepCount) 
    throws IOException {
  Configuration conf = getConf();
  conf.setLong(MAP_SLEEP_TIME, mapSleepTime);
  conf.setLong(REDUCE_SLEEP_TIME, reduceSleepTime);
  conf.setInt(MAP_SLEEP_COUNT, mapSleepCount);
  conf.setInt(REDUCE_SLEEP_COUNT, reduceSleepCount);
  conf.setInt(MRJobConfig.NUM_MAPS, numMapper);
  Job job = Job.getInstance(conf, "sleep");
  job.setNumReduceTasks(numReducer);
  job.setJarByClass(SleepJob.class);
  job.setMapperClass(SleepMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
  job.setReducerClass(SleepReducer.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setInputFormatClass(SleepInputFormat.class);
  job.setPartitionerClass(SleepJobPartitioner.class);
  job.setSpeculativeExecution(false);
  job.setJobName("Sleep job");
  FileInputFormat.addInputPath(job, new Path("ignored"));
  return job;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:SleepJob.java

示例4: hFlushFi01_c

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/** Similar to {@link #hFlushFi01_b()} but writing happens
 * across block and checksum's boundaries
 */
@Test
public void hFlushFi01_c() throws Exception { 
  final String methodName = FiTestUtil.getMethodName();
  Configuration conf = new HdfsConfiguration();
  int customPerChecksumSize = 400;
  int customBlockSize = customPerChecksumSize * 3;
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
  runDiskErrorTest(conf, methodName, 
      customBlockSize, new DerrAction(methodName, 0), 0, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestFiHFlush.java

示例5: setupBeforeClass

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@BeforeClass
public static void setupBeforeClass() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      RowProcessorEndpoint.class.getName());
  conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
  conf.setLong("hbase.hregion.row.processor.timeout", 1000L);
  util.startMiniCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:TestRowProcessorEndpoint.java

示例6: testPreReceiptVerificationDfsClientCanDoScr

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testPreReceiptVerificationDfsClientCanDoScr() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testPreReceiptVerificationDfsClientCanDoScr", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  fs.getClient().getConf().brfFailureInjector =
      new TestPreReceiptVerificationFailureInjector();
  final Path TEST_PATH1 = new Path("/test_file1");
  DFSTestUtil.createFile(fs, TEST_PATH1, 4096, (short)1, 0xFADE2);
  final Path TEST_PATH2 = new Path("/test_file2");
  DFSTestUtil.createFile(fs, TEST_PATH2, 4096, (short)1, 0xFADE2);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  ShortCircuitRegistry registry =
      cluster.getDataNodes().get(0).getShortCircuitRegistry();
  registry.visit(new ShortCircuitRegistry.Visitor() {
    @Override
    public void accept(HashMap<ShmId, RegisteredShm> segments,
                       HashMultimap<ExtendedBlockId, Slot> slots) {
      Assert.assertEquals(1, segments.size());
      Assert.assertEquals(2, slots.size());
    }
  });
  cluster.shutdown();
  sockDir.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestShortCircuitCache.java

示例7: testEOFWithRemoteBlockReader

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testEOFWithRemoteBlockReader() throws Exception {
  final Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD, BLOCK_SIZE);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  testEOF(cluster, 1);
  testEOF(cluster, 14);
  testEOF(cluster, 10000);   
  cluster.shutdown();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestRead.java

示例8: setUp

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  conf = new Configuration();
  // Stall the standby checkpointer in two ways
  conf.setLong(DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, Long.MAX_VALUE);
  conf.setLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 20);
  // Make it autoroll after 10 edits
  conf.setFloat(DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD, 0.5f);
  conf.setInt(DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS, 100);

  int retryCount = 0;
  while (true) {
    try {
      int basePort = 10060 + random.nextInt(100) * 2;
      MiniDFSNNTopology topology = new MiniDFSNNTopology()
          .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
              .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
              .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));

      cluster = new MiniDFSCluster.Builder(conf)
          .nnTopology(topology)
          .numDataNodes(0)
          .build();
      cluster.waitActive();

      nn0 = cluster.getNameNode(0);
      fs = HATestUtil.configureFailoverFs(cluster, conf);

      cluster.transitionToActive(0);

      fs = cluster.getFileSystem(0);
      editLog = nn0.getNamesystem().getEditLog();
      ++retryCount;
      break;
    } catch (BindException e) {
      LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
          + retryCount + " times");
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:TestEditLogAutoroll.java

示例9: testWhenDecreasingReplication

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * The corrupt block has to be removed when the number of valid replicas
 * matches replication factor for the file. In this the above condition is
 * tested by reducing the replication factor 
 * The test strategy : 
 *   Bring up Cluster with 3 DataNodes
 *   Create a file of replication factor 3 
 *   Corrupt one replica of a block of the file 
 *   Verify that there are still 2 good replicas and 1 corrupt replica
 *    (corrupt replica should not be removed since number of good
 *     replicas (2) is less than replication factor (3))
 *   Set the replication factor to 2 
 *   Verify that the corrupt replica is removed. 
 *     (corrupt replica  should not be removed since number of good
 *      replicas (2) is equal to replication factor (2))
 */
@Test
public void testWhenDecreasingReplication() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
  conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fs = cluster.getFileSystem();
  final FSNamesystem namesystem = cluster.getNamesystem();

  try {
    final Path fileName = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short) 3);

    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    corruptBlock(cluster, fs, fileName, 0, block);

    DFSTestUtil.waitReplication(fs, fileName, (short) 2);

    assertEquals(2, countReplicas(namesystem, block).liveReplicas());
    assertEquals(1, countReplicas(namesystem, block).corruptReplicas());

    namesystem.setReplication(fileName.toString(), (short) 2);

    // wait for 3 seconds so that all block reports are processed.
    try {
      Thread.sleep(3000);
    } catch (InterruptedException ignored) {
    }

    assertEquals(2, countReplicas(namesystem, block).liveReplicas());
    assertEquals(0, countReplicas(namesystem, block).corruptReplicas());

  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:54,代码来源:TestProcessCorruptBlocks.java

示例10: setUp

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
      .build();
  cluster.waitActive();

  fsn = cluster.getNamesystem();
  fsdir = fsn.getFSDirectory();
  hdfs = cluster.getFileSystem();
  dirTree = new TestDirectoryTree(DIRECTORY_TREE_LEVEL, hdfs);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestSnapshot.java

示例11: testPluggingInHeapMemoryTuner

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testPluggingInHeapMemoryTuner() throws Exception {
  BlockCacheStub blockCache = new BlockCacheStub((long) (maxHeapSize * 0.4));
  MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4));
  Configuration conf = HBaseConfiguration.create();
  conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.78f);
  conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.05f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.75f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.02f);
  conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000);
  conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0);
  conf.setClass(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_CLASS, CustomHeapMemoryTuner.class,
      HeapMemoryTuner.class);
  // Let the system start with default values for memstore heap and block cache size.
  HeapMemoryManager heapMemoryManager = new HeapMemoryManager(blockCache, memStoreFlusher,
      new RegionServerStub(conf), new RegionServerAccountingStub());
  final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
  heapMemoryManager.start(choreService);
  // Now we wants to be in write mode. Set bigger memstore size from CustomHeapMemoryTuner
  CustomHeapMemoryTuner.memstoreSize = 0.78f;
  CustomHeapMemoryTuner.blockCacheSize = 0.02f;
  // Allow the tuner to run once and do necessary memory up
  waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize);
  assertHeapSpace(0.78f, memStoreFlusher.memstoreSize);// Memstore
  assertHeapSpace(0.02f, blockCache.maxSize);// BlockCache
  // Now we wants to be in read mode. Set bigger memstore size from CustomHeapMemoryTuner
  CustomHeapMemoryTuner.blockCacheSize = 0.75f;
  CustomHeapMemoryTuner.memstoreSize = 0.05f;
  // Allow the tuner to run once and do necessary memory up
  waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize);
  assertHeapSpace(0.75f, blockCache.maxSize);// BlockCache
  assertHeapSpace(0.05f, memStoreFlusher.memstoreSize);// Memstore
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestHeapMemoryManager.java

示例12: testRevocation

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Test that when we have an uncache request, and the client refuses to release
 * the replica for a long time, we will un-mlock it.
 */
@Test(timeout=120000)
public void testRevocation() throws Exception {
  assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
  BlockReaderTestUtil.enableHdfsCachingTracing();
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  Configuration conf = getDefaultConf();
  // Set a really short revocation timeout.
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS, 250L);
  // Poll very often
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L);
  MiniDFSCluster cluster = null;
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem dfs = cluster.getFileSystem();

  // Create and cache a file.
  final String TEST_FILE = "/test_file2";
  DFSTestUtil.createFile(dfs, new Path(TEST_FILE),
      BLOCK_SIZE, (short)1, 0xcafe);
  dfs.addCachePool(new CachePoolInfo("pool"));
  long cacheDirectiveId =
      dfs.addCacheDirective(new CacheDirectiveInfo.Builder().
          setPool("pool").setPath(new Path(TEST_FILE)).
          setReplication((short) 1).build());
  FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
  DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);

  // Mmap the file.
  FSDataInputStream in = dfs.open(new Path(TEST_FILE));
  ByteBuffer buf =
      in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class));

  // Attempt to uncache file.  The file should get uncached.
  LOG.info("removing cache directive {}", cacheDirectiveId);
  dfs.removeCacheDirective(cacheDirectiveId);
  LOG.info("finished removing cache directive {}", cacheDirectiveId);
  Thread.sleep(1000);
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);

  // Cleanup
  in.releaseBuffer(buf);
  in.close();
  cluster.shutdown();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestFsDatasetCacheRevocation.java

示例13: testDataXceiverHandlesRequestShortCircuitShmFailure

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testDataXceiverHandlesRequestShortCircuitShmFailure()
    throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testDataXceiverHandlesRequestShortCircuitShmFailure", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path TEST_PATH1 = new Path("/test_file1");
  DFSTestUtil.createFile(fs, TEST_PATH1, 4096,
      (short)1, 0xFADE1);
  LOG.info("Setting failure injector and performing a read which " +
      "should fail...");
  DataNodeFaultInjector failureInjector = Mockito.mock(DataNodeFaultInjector.class);
  Mockito.doAnswer(new Answer<Void>() {
    @Override
    public Void answer(InvocationOnMock invocation) throws Throwable {
      throw new IOException("injected error into sendShmResponse");
    }
  }).when(failureInjector).sendShortCircuitShmResponse();
  DataNodeFaultInjector prevInjector = DataNodeFaultInjector.instance;
  DataNodeFaultInjector.instance = failureInjector;

  try {
    // The first read will try to allocate a shared memory segment and slot.
    // The shared memory segment allocation will fail because of the failure
    // injector.
    DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
    Assert.fail("expected readFileBuffer to fail, but it succeeded.");
  } catch (Throwable t) {
    GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
        "testing, but we failed to do a non-TCP read.", t);
  }

  checkNumberOfSegmentsAndSlots(0, 0,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());

  LOG.info("Clearing failure injector and performing another read...");
  DataNodeFaultInjector.instance = prevInjector;

  fs.getClient().getClientContext().getDomainSocketFactory().clearPathMap();

  // The second read should succeed.
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);

  // We should have added a new short-circuit shared memory segment and slot.
  checkNumberOfSegmentsAndSlots(1, 1,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());

  cluster.shutdown();
  sockDir.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:TestShortCircuitCache.java

示例14: testMapCount

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public void testMapCount() throws Exception {
  String namenode = null;
  MiniDFSCluster dfs = null;
  MiniDFSCluster mr = null;
  try {
    Configuration conf = new Configuration();
    
    dfs= new MiniDFSCluster.Builder(conf).numDataNodes(3).format(true).build();
    
    FileSystem fs = dfs.getFileSystem();
    final FsShell shell = new FsShell(conf);
    namenode = fs.getUri().toString();
    MyFile[] files = createFiles(fs.getUri(), "/srcdat");
    long totsize = 0;
    for (MyFile f : files) {
      totsize += f.getSize();
    }
    
    Configuration job = new JobConf(conf);
    job.setLong("distcp.bytes.per.map", totsize / 3);
    ToolRunner.run(new DistCpV1(job),
        new String[] {"-m", "100",
                      "-log",
                      namenode+"/logs",
                      namenode+"/srcdat",
                      namenode+"/destdat"});
    assertTrue("Source and destination directories do not match.",
               checkFiles(fs, "/destdat", files));

    String logdir = namenode + "/logs";
    System.out.println(execCmd(shell, "-lsr", logdir));
    FileStatus[] logs = fs.listStatus(new Path(logdir));
    // rare case where splits are exact, logs.length can be 4
    assertTrue( logs.length == 2);

    deldir(fs, "/destdat");
    deldir(fs, "/logs");
    ToolRunner.run(new DistCpV1(job),
        new String[] {"-m", "1",
                      "-log",
                      namenode+"/logs",
                      namenode+"/srcdat",
                      namenode+"/destdat"});

    System.out.println(execCmd(shell, "-lsr", logdir));
    logs = fs.globStatus(new Path(namenode+"/logs/part*"));
    assertTrue("Unexpected map count, logs.length=" + logs.length,
        logs.length == 1);
  } finally {
    if (dfs != null) { dfs.shutdown(); }
    if (mr != null) { mr.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:54,代码来源:TestCopyFiles.java

示例15: disableBlockScanner

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private static void disableBlockScanner(Configuration conf) {
  conf.setLong(DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND, 0L);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:4,代码来源:TestBlockScanner.java


注:本文中的org.apache.hadoop.conf.Configuration.setLong方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。