當前位置: 首頁>>代碼示例>>Java>>正文


Java Configuration.setInt方法代碼示例

本文整理匯總了Java中org.apache.hadoop.conf.Configuration.setInt方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.setInt方法的具體用法?Java Configuration.setInt怎麽用?Java Configuration.setInt使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.setInt方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getProxyForAddress

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private HAServiceProtocol getProxyForAddress(Configuration conf,
    int timeoutMs, InetSocketAddress addr) throws IOException {
  Configuration confCopy = new Configuration(conf);
  // Lower the timeout so we quickly fail to connect
  confCopy.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
  SocketFactory factory = NetUtils.getDefaultSocketFactory(confCopy);
  return new HAServiceProtocolClientSideTranslatorPB(
      addr,
      confCopy, factory, timeoutMs);
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:12,代碼來源:HAServiceTarget.java

示例2: setUp

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Before
public void setUp() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
      .createJournalURI("/bootstrapStandby").toString());
  BKJMUtil.addJournalManagerDefinition(conf);
  conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
  conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
      SlowCodec.class.getCanonicalName());
  CompressionCodecFactory.setCodecClasses(conf,
      ImmutableList.<Class> of(SlowCodec.class));
  MiniDFSNNTopology topology = new MiniDFSNNTopology()
      .addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(
          new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)).addNN(
          new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
  cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
      .numDataNodes(1).manageNameDfsSharedDirs(false).build();
  cluster.waitActive();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:23,代碼來源:TestBootstrapStandbyWithBKJM.java

示例3: configureServlets

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
protected void configureServlets() {
  bind(JAXBContextResolver.class);
  bind(RMWebServices.class);
  bind(GenericExceptionHandler.class);
  Configuration rmconf = new Configuration();
  rmconf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
    YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
  rmconf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
    ResourceScheduler.class);
  rmconf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
  rm = new MockRM(rmconf);
  bind(ResourceManager.class).toInstance(rm);
  if (isKerberosAuth == true) {
    filter("/*").through(TestKerberosAuthFilter.class);
  } else {
    filter("/*").through(TestSimpleAuthFilter.class);
  }
  serve("/*").with(GuiceContainer.class);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:TestRMWebServicesDelegationTokens.java

示例4: setupConf

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private static void setupConf(Configuration conf) {
  // disable the ui
  conf.setInt("hbase.regionsever.info.port", -1);
  // change the flush size to a small amount, regulating number of store files
  conf.setInt("hbase.hregion.memstore.flush.size", 25000);
  // so make sure we get a compaction when doing a load, but keep around some
  // files in the store
  conf.setInt("hbase.hstore.compaction.min", 2);
  conf.setInt("hbase.hstore.compactionThreshold", 5);
  // block writes if we get to 12 store files
  conf.setInt("hbase.hstore.blockingStoreFiles", 12);
  // Ensure no extra cleaners on by default (e.g. TimeToLiveHFileCleaner)
  conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
  conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, "");
  // Enable snapshot
  conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
  conf.setLong(SnapshotHFileCleaner.HFILE_CACHE_REFRESH_PERIOD_CONF_KEY, cacheRefreshPeriod);
  conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
    ConstantSizeRegionSplitPolicy.class.getName());

}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:22,代碼來源:TestSnapshotFromMaster.java

示例5: propagateOptionsToJob

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
protected void propagateOptionsToJob(Job job) {
  super.propagateOptionsToJob(job);
  Configuration conf = job.getConfiguration();

  // This is export job where re-trying failed mapper mostly don't make sense. By
  // default we will force MR to run only one attempt per mapper. User or connector
  // developer can override this behavior by setting SQOOP_EXPORT_MAP_TASK_MAX_ATTEMTPS:
  //
  // * Positive number - we will allow specified number of attempts
  // * Negative number - we will default to Hadoop's default number of attempts
  //
  // This is important for most connectors as they are directly committing data to
  // final table and hence re-running one mapper will lead to a misleading errors
  // of inserting duplicate rows.
  int sqoopMaxAttempts = conf.getInt(SQOOP_EXPORT_MAP_TASK_MAX_ATTEMTPS, 1);
  if (sqoopMaxAttempts > 1) {
    conf.setInt(HADOOP_MAP_TASK_MAX_ATTEMTPS, sqoopMaxAttempts);
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:21,代碼來源:ExportJobBase.java

示例6: setupCluster

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@SuppressWarnings("rawtypes")
@Before
public void setupCluster() throws Exception {
  Configuration conf = setupCommonConfig();

  // Dial down the retention of extra edits and checkpoints. This is to
  // help catch regressions of HDFS-4238 (SBN should not purge shared edits)
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);

  int retryCount = 0;
  while (true) {
    try {
      int basePort = 10060 + random.nextInt(100) * 2;
      MiniDFSNNTopology topology = new MiniDFSNNTopology()
          .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
              .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
              .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));

      cluster = new MiniDFSCluster.Builder(conf)
          .nnTopology(topology)
          .numDataNodes(1)
          .build();
      cluster.waitActive();

      nn0 = cluster.getNameNode(0);
      nn1 = cluster.getNameNode(1);
      fs = HATestUtil.configureFailoverFs(cluster, conf);

      cluster.transitionToActive(0);
      ++retryCount;
      break;
    } catch (BindException e) {
      LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
          + retryCount + " times");
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:39,代碼來源:TestStandbyCheckpoints.java

示例7: testRbwReplicas

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public void testRbwReplicas() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  cluster.waitActive();
  try {
    testRbwReplicas(cluster, false);
    testRbwReplicas(cluster, true);
  } finally {
    cluster.shutdown();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:14,代碼來源:TestDatanodeRestart.java

示例8: testStripBOM

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testStripBOM() throws IOException {
  // the test data contains a BOM at the start of the file
  // confirm the BOM is skipped by LineRecordReader
  String UTF8_BOM = "\uFEFF";
  URL testFileUrl = getClass().getClassLoader().getResource("testBOM.txt");
  assertNotNull("Cannot find testBOM.txt", testFileUrl);
  File testFile = new File(testFileUrl.getFile());
  Path testFilePath = new Path(testFile.getAbsolutePath());
  long testFileSize = testFile.length();
  Configuration conf = new Configuration();
  conf.setInt(org.apache.hadoop.mapreduce.lib.input.
      LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);

  TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());

  // read the data and check whether BOM is skipped
  FileSplit split = new FileSplit(testFilePath, 0, testFileSize,
      (String[])null);
  LineRecordReader reader = new LineRecordReader();
  reader.initialize(split, context);
  int numRecords = 0;
  boolean firstLine = true;
  boolean skipBOM = true;
  while (reader.nextKeyValue()) {
    if (firstLine) {
      firstLine = false;
      if (reader.getCurrentValue().toString().startsWith(UTF8_BOM)) {
        skipBOM = false;
      }
    }
    ++numRecords;
  }
  reader.close();

  assertTrue("BOM is not skipped", skipBOM);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:38,代碼來源:TestLineRecordReader.java

示例9: HadoopTrainWorker

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public HadoopTrainWorker(Configuration conf,
                         String modelName,
                         String configPath,
                         String configFile,
                         String pyTransformScript,
                         boolean needPyTransform,
                         String loginName,
                         String hostName,
                         int hostPort,
                         int slaveNum,
                         int threadNum,
                         String hadoopQueueName,
                         String hadoopReduceMemory,
                         String user) throws Exception {
    super(modelName, configPath, configFile, pyTransformScript, needPyTransform, loginName, hostName, hostPort, threadNum);
    this.conf = conf;
    this.slaveNum = slaveNum;
    this.hadoopQueueName = hadoopQueueName;
    this.hadoopReduceMemory = hadoopReduceMemory;
    this.user = user;

    conf.set("mapreduce.task.timeout", "720000000");
    conf.set("modelName", modelName);
    conf.set("configFile", configFile);
    conf.set("pyTransformScript", pyTransformScript);
    conf.setBoolean("needPyTransform", needPyTransform);
    conf.set("loginName", loginName);
    conf.set("hostName", hostName);
    conf.setInt("hostPort", hostPort);
    conf.setInt("slaveNum", slaveNum);
    conf.setInt("threadNum", threadNum);

    conf.set("mapreduce.job.queuename", hadoopQueueName);
    conf.set("mapreduce.reduce.memory.mb", hadoopReduceMemory);
    conf.set("mapreduce.reduce.java.opts", "-Xmx" + ((int)((Integer.parseInt(hadoopReduceMemory) * 0.9))) + "m");
    conf.set("yarn.app.mapreduce.am.resource.mb", "" + threadNum);
    conf.set("mapreduce.reduce.cpu.vcores", "" + threadNum);
}
 
開發者ID:yuantiku,項目名稱:ytk-learn,代碼行數:39,代碼來源:HadoopTrainWorker.java

示例10: typical

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void typical() {
  Configuration conf = new Configuration();
  conf.set(ConfigurationVariable.REGION.getName(), "eu-west-1");
  conf.setInt(ConfigurationVariable.UPLOAD_RETRY_COUNT.getName(), 7);
  conf.setLong(ConfigurationVariable.UPLOAD_RETRY_DELAY_MS.getName(), 333L);
  AmazonS3 client = factory.newInstance(conf);
  assertThat(client, is(instanceOf(AmazonS3Client.class)));
  assertThat(client.getRegion(), is(Region.EU_Ireland));
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:11,代碼來源:AwsS3ClientFactoryTest.java

示例11: runFsck

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private void runFsck(String command) throws Exception {
  Configuration conf = fs.getConf();
  // Set the dangling cutoff to zero, so every temp blob is considered
  // dangling.
  conf.setInt(NativeAzureFileSystem.AZURE_TEMP_EXPIRY_PROPERTY_NAME, 0);
  WasbFsck fsck = new WasbFsck(conf);
  fsck.setMockFileSystemForTesting(fs);
  fsck.run(new String[] { AzureBlobStorageTestAccount.MOCK_WASB_URI, command });
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:10,代碼來源:TestWasbFsck.java

示例12: setUp

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Before
public void setUp() throws IOException { // Setup server for both protocols
  conf = new Configuration();
  conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024);
  conf.setBoolean(CommonConfigurationKeys.IPC_SERVER_LOG_SLOW_RPC, true);
  // Set RPC engine to protobuf RPC engine
  RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
  RPC.setProtocolEngine(conf, TestRpcService2.class, ProtobufRpcEngine.class);

  // Create server side implementation
  PBServerImpl serverImpl = new PBServerImpl();
  BlockingService service = TestProtobufRpcProto
      .newReflectiveBlockingService(serverImpl);

  // Get RPC server for server side implementation
  server = new RPC.Builder(conf).setProtocol(TestRpcService.class)
      .setInstance(service).setBindAddress(ADDRESS).setPort(PORT).build();
  addr = NetUtils.getConnectAddress(server);

  // now the second protocol
  PBServer2Impl server2Impl = new PBServer2Impl();
  BlockingService service2 = TestProtobufRpc2Proto
      .newReflectiveBlockingService(server2Impl);

  server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService2.class,
      service2);
  server.start();
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:29,代碼來源:TestProtoBufRpc.java

示例13: setupBeforeClass

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@BeforeClass
public static void setupBeforeClass() throws Exception {
  Configuration conf = UTIL.getConfiguration();
  conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
      BuggyMasterObserver.class.getName());
  conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true);
  conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);  // Fail fast
  UTIL.startMiniCluster();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:10,代碼來源:TestMasterCoprocessorExceptionWithAbort.java

示例14: createPolicy

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private static StripeCompactionPolicy createPolicy(Configuration conf,
    long splitSize, float splitCount, int initialCount, boolean hasTtl) throws Exception {
  conf.setLong(StripeStoreConfig.SIZE_TO_SPLIT_KEY, splitSize);
  conf.setFloat(StripeStoreConfig.SPLIT_PARTS_KEY, splitCount);
  conf.setInt(StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY, initialCount);
  StoreConfigInformation sci = mock(StoreConfigInformation.class);
  when(sci.getStoreFileTtl()).thenReturn(hasTtl ? defaultTtl : Long.MAX_VALUE);
  StripeStoreConfig ssc = new StripeStoreConfig(conf, sci);
  return new StripeCompactionPolicy(conf, sci, ssc);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:11,代碼來源:TestStripeCompactionPolicy.java

示例15: testClientTriggeredLeaseRecovery

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/** Test lease recovery Triggered by DFSClient. */
@Test
public void testClientTriggeredLeaseRecovery() throws Exception {
  final int REPLICATION = 3;
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, REPLICATION);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();

  try {
    final FileSystem fs = cluster.getFileSystem();
    final Path dir = new Path("/wrwelkj");
    
    SlowWriter[] slowwriters = new SlowWriter[10];
    for(int i = 0; i < slowwriters.length; i++) {
      slowwriters[i] = new SlowWriter(fs, new Path(dir, "file" + i));
    }

    try {
      for(int i = 0; i < slowwriters.length; i++) {
        slowwriters[i].start();
      }

      Thread.sleep(1000);                       // let writers get started

      //stop a datanode, it should have least recover.
      cluster.stopDataNode(AppendTestUtil.nextInt(REPLICATION));
      
      //let the slow writer writes a few more seconds
      System.out.println("Wait a few seconds");
      Thread.sleep(5000);
    }
    finally {
      for(int i = 0; i < slowwriters.length; i++) {
        if (slowwriters[i] != null) {
          slowwriters[i].running = false;
          slowwriters[i].interrupt();
        }
      }
      for(int i = 0; i < slowwriters.length; i++) {
        if (slowwriters[i] != null) {
          slowwriters[i].join();
        }
      }
    }

    //Verify the file
    System.out.println("Verify the file");
    for(int i = 0; i < slowwriters.length; i++) {
      System.out.println(slowwriters[i].filepath + ": length="
          + fs.getFileStatus(slowwriters[i].filepath).getLen());
      FSDataInputStream in = null;
      try {
        in = fs.open(slowwriters[i].filepath);
        for(int j = 0, x; (x = in.read()) != -1; j++) {
          assertEquals(j, x);
        }
      }
      finally {
        IOUtils.closeStream(in);
      }
    }
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:67,代碼來源:TestFileCreationClient.java


注:本文中的org.apache.hadoop.conf.Configuration.setInt方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。