当前位置: 首页>>代码示例>>Java>>正文


Java JobConf.setLong方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapred.JobConf.setLong方法的典型用法代码示例。如果您正苦于以下问题:Java JobConf.setLong方法的具体用法?Java JobConf.setLong怎么用?Java JobConf.setLong使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapred.JobConf的用法示例。


在下文中一共展示了JobConf.setLong方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: init

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
@Before
public void init() throws HadoopIllegalArgumentException, IOException {
  conf = new JobConf();
  conf.set(JHAdminConfig.JHS_ADMIN_ADDRESS, "0.0.0.0:0");
  conf.setClass("hadoop.security.group.mapping", MockUnixGroupsMapping.class,
      GroupMappingServiceProvider.class);
  conf.setLong("hadoop.security.groups.cache.secs", groupRefreshTimeoutSec);
  conf.setBoolean(
        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
        securityEnabled);
  Groups.getUserToGroupsMappingService(conf);
  jobHistoryService = mock(JobHistory.class);
  alds = mock(AggregatedLogDeletionService.class);

  hsAdminServer = new HSAdminServer(alds, jobHistoryService) {

    @Override
    protected Configuration createConf() {
      return conf;
    }
  };
  hsAdminServer.init(conf);
  hsAdminServer.start();
  conf.setSocketAddr(JHAdminConfig.JHS_ADMIN_ADDRESS,
      hsAdminServer.clientRpcServer.getListenerAddress());
  hsAdminClient = new HSAdmin(conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestHSAdminServer.java

示例2: getSplits

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
/**
 * Build a CompositeInputSplit from the child InputFormats by assigning the
 * ith split from each child to the ith composite split.
 */
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
  setFormat(job);
  job.setLong("mapred.min.split.size", Long.MAX_VALUE);
  return root.getSplits(job, numSplits);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:CompositeInputFormat.java

示例3: testMemoryMerge

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
@Test(timeout=10000)
public void testMemoryMerge() throws Exception {
  final int TOTAL_MEM_BYTES = 10000;
  final int OUTPUT_SIZE = 7950;
  JobConf conf = new JobConf();
  conf.setFloat(MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT, 1.0f);
  conf.setLong(MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES, TOTAL_MEM_BYTES);
  conf.setFloat(MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT, 0.8f);
  conf.setFloat(MRJobConfig.SHUFFLE_MERGE_PERCENT, 0.9f);
  TestExceptionReporter reporter = new TestExceptionReporter();
  CyclicBarrier mergeStart = new CyclicBarrier(2);
  CyclicBarrier mergeComplete = new CyclicBarrier(2);
  StubbedMergeManager mgr = new StubbedMergeManager(conf, reporter,
      mergeStart, mergeComplete);

  // reserve enough map output to cause a merge when it is committed
  MapOutput<Text, Text> out1 = mgr.reserve(null, OUTPUT_SIZE, 0);
  Assert.assertTrue("Should be a memory merge",
                    (out1 instanceof InMemoryMapOutput));
  InMemoryMapOutput<Text, Text> mout1 = (InMemoryMapOutput<Text, Text>)out1;
  fillOutput(mout1);
  MapOutput<Text, Text> out2 = mgr.reserve(null, OUTPUT_SIZE, 0);
  Assert.assertTrue("Should be a memory merge",
                    (out2 instanceof InMemoryMapOutput));
  InMemoryMapOutput<Text, Text> mout2 = (InMemoryMapOutput<Text, Text>)out2;
  fillOutput(mout2);

  // next reservation should be a WAIT
  MapOutput<Text, Text> out3 = mgr.reserve(null, OUTPUT_SIZE, 0);
  Assert.assertEquals("Should be told to wait", null, out3);

  // trigger the first merge and wait for merge thread to start merging
  // and free enough output to reserve more
  mout1.commit();
  mout2.commit();
  mergeStart.await();

  Assert.assertEquals(1, mgr.getNumMerges());

  // reserve enough map output to cause another merge when committed
  out1 = mgr.reserve(null, OUTPUT_SIZE, 0);
  Assert.assertTrue("Should be a memory merge",
                     (out1 instanceof InMemoryMapOutput));
  mout1 = (InMemoryMapOutput<Text, Text>)out1;
  fillOutput(mout1);
  out2 = mgr.reserve(null, OUTPUT_SIZE, 0);
  Assert.assertTrue("Should be a memory merge",
                     (out2 instanceof InMemoryMapOutput));
  mout2 = (InMemoryMapOutput<Text, Text>)out2;
  fillOutput(mout2);

  // next reservation should be null
  out3 = mgr.reserve(null, OUTPUT_SIZE, 0);
  Assert.assertEquals("Should be told to wait", null, out3);

  // commit output *before* merge thread completes
  mout1.commit();
  mout2.commit();

  // allow the first merge to complete
  mergeComplete.await();

  // start the second merge and verify
  mergeStart.await();
  Assert.assertEquals(2, mgr.getNumMerges());

  // trigger the end of the second merge
  mergeComplete.await();

  Assert.assertEquals(2, mgr.getNumMerges());
  Assert.assertEquals("exception reporter invoked",
      0, reporter.getNumExceptions());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:74,代码来源:TestMergeManager.java

示例4: createDataJoinJob

import org.apache.hadoop.mapred.JobConf; //导入方法依赖的package包/类
public static JobConf createDataJoinJob(String args[]) throws IOException {

    String inputDir = args[0];
    String outputDir = args[1];
    Class inputFormat = SequenceFileInputFormat.class;
    if (args[2].compareToIgnoreCase("text") != 0) {
      System.out.println("Using SequenceFileInputFormat: " + args[2]);
    } else {
      System.out.println("Using TextInputFormat: " + args[2]);
      inputFormat = TextInputFormat.class;
    }
    int numOfReducers = Integer.parseInt(args[3]);
    Class mapper = getClassByName(args[4]);
    Class reducer = getClassByName(args[5]);
    Class mapoutputValueClass = getClassByName(args[6]);
    Class outputFormat = TextOutputFormat.class;
    Class outputValueClass = Text.class;
    if (args[7].compareToIgnoreCase("text") != 0) {
      System.out.println("Using SequenceFileOutputFormat: " + args[7]);
      outputFormat = SequenceFileOutputFormat.class;
      outputValueClass = getClassByName(args[7]);
    } else {
      System.out.println("Using TextOutputFormat: " + args[7]);
    }
    long maxNumOfValuesPerGroup = 100;
    String jobName = "";
    if (args.length > 8) {
      maxNumOfValuesPerGroup = Long.parseLong(args[8]);
    }
    if (args.length > 9) {
      jobName = args[9];
    }
    Configuration defaults = new Configuration();
    JobConf job = new JobConf(defaults, DataJoinJob.class);
    job.setJobName("DataJoinJob: " + jobName);

    FileSystem fs = FileSystem.get(defaults);
    fs.delete(new Path(outputDir), true);
    FileInputFormat.setInputPaths(job, inputDir);

    job.setInputFormat(inputFormat);

    job.setMapperClass(mapper);
    FileOutputFormat.setOutputPath(job, new Path(outputDir));
    job.setOutputFormat(outputFormat);
    SequenceFileOutputFormat.setOutputCompressionType(job,
            SequenceFile.CompressionType.BLOCK);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(mapoutputValueClass);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(outputValueClass);
    job.setReducerClass(reducer);

    job.setNumMapTasks(1);
    job.setNumReduceTasks(numOfReducers);
    job.setLong("datajoin.maxNumOfValuesPerGroup", maxNumOfValuesPerGroup);
    return job;
  }
 
开发者ID:naver,项目名称:hadoop,代码行数:59,代码来源:DataJoinJob.java


注:本文中的org.apache.hadoop.mapred.JobConf.setLong方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。