当前位置: 首页>>代码示例>>Java>>正文


Java MRJobConfig类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.MRJobConfig的典型用法代码示例。如果您正苦于以下问题:Java MRJobConfig类的具体用法?Java MRJobConfig怎么用?Java MRJobConfig使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


MRJobConfig类属于org.apache.hadoop.mapreduce包,在下文中一共展示了MRJobConfig类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: reduce

import org.apache.hadoop.mapreduce.MRJobConfig; //导入依赖的package包/类
public void reduce(Text key, Iterable<IntWritable> values, 
                       Context context) throws IOException, InterruptedException {
  // Make one reducer slower for speculative execution
  TaskAttemptID taid = context.getTaskAttemptID();
  long sleepTime = 100;
  Configuration conf = context.getConfiguration();
  boolean test_speculate_reduce =
            conf.getBoolean(MRJobConfig.REDUCE_SPECULATIVE, false);

  // IF TESTING REDUCE SPECULATIVE EXECUTION:
  //   Make the "*_r_000000_0" attempt take much longer than the others.
  //   When speculative execution is enabled, this should cause the attempt
  //   to be killed and restarted. At that point, the attempt ID will be
  //   "*_r_000000_1", so sleepTime will still remain 100ms.
  if ( (taid.getTaskType() == TaskType.REDUCE) && test_speculate_reduce
        && (taid.getTaskID().getId() == 0) && (taid.getId() == 0)) {
    sleepTime = 10000;
  }
  try{
    Thread.sleep(sleepTime);
  } catch(InterruptedException ie) {
    // Ignore
  }
  context.write(key,new IntWritable(0));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestSpeculativeExecution.java

示例2: configure

import org.apache.hadoop.mapreduce.MRJobConfig; //导入依赖的package包/类
public void configure(JobConf job) {
  super.configure(job);
  //disable the auto increment of the counter. For streaming, no of 
  //processed records could be different(equal or less) than the no of 
  //records input.
  SkipBadRecords.setAutoIncrMapperProcCount(job, false);
  skipping = job.getBoolean(MRJobConfig.SKIP_RECORDS, false);
  if (mapInputWriterClass_.getCanonicalName().equals(TextInputWriter.class.getCanonicalName())) {
    String inputFormatClassName = job.getClass("mapred.input.format.class", TextInputFormat.class).getCanonicalName();
    ignoreKey = job.getBoolean("stream.map.input.ignoreKey", 
      inputFormatClassName.equals(TextInputFormat.class.getCanonicalName()));
  }
  
  try {
    mapOutputFieldSeparator = job.get("stream.map.output.field.separator", "\t").getBytes("UTF-8");
    mapInputFieldSeparator = job.get("stream.map.input.field.separator", "\t").getBytes("UTF-8");
    numOfMapOutputKeyFields = job.getInt("stream.num.map.output.key.fields", 1);
  } catch (UnsupportedEncodingException e) {
    throw new RuntimeException("The current system does not support UTF-8 encoding!", e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:PipeMapper.java

示例3: testSetClasspathWithNoUserPrecendence

import org.apache.hadoop.mapreduce.MRJobConfig; //导入依赖的package包/类
@Test (timeout = 120000)
public void testSetClasspathWithNoUserPrecendence() {
  Configuration conf = new Configuration();
  conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
  conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, false);
  Map<String, String> env = new HashMap<String, String>();
  try {
    MRApps.setClasspath(env, conf);
  } catch (Exception e) {
    fail("Got exception while setting classpath");
  }
  String env_str = env.get("CLASSPATH");
  String expectedClasspath = StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,
    Arrays.asList("job.jar/job.jar", "job.jar/classes/", "job.jar/lib/*",
      ApplicationConstants.Environment.PWD.$$() + "/*"));
  assertTrue("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, and job.jar is not in"
    + " the classpath!", env_str.contains(expectedClasspath));
  assertFalse("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, but taking effect!",
    env_str.startsWith(expectedClasspath));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestMRApps.java

示例4: testGetTokensForNamenodes

import org.apache.hadoop.mapreduce.MRJobConfig; //导入依赖的package包/类
@SuppressWarnings("deprecation")
@Test
public void testGetTokensForNamenodes() throws IOException,
    URISyntaxException {
  Path TEST_ROOT_DIR =
      new Path(System.getProperty("test.build.data", "test/build/data"));
  // ick, but need fq path minus file:/
  String binaryTokenFile =
      FileSystem.getLocal(conf)
        .makeQualified(new Path(TEST_ROOT_DIR, "tokenFile")).toUri()
        .getPath();

  MockFileSystem fs1 = createFileSystemForServiceName("service1");
  Credentials creds = new Credentials();
  Token<?> token1 = fs1.getDelegationToken(renewer);
  creds.addToken(token1.getService(), token1);
  // wait to set, else the obtain tokens call above will fail with FNF
  conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY, binaryTokenFile);
  creds.writeTokenStorageFile(new Path(binaryTokenFile), conf);
  TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf);
  String fs_addr = fs1.getCanonicalServiceName();
  Token<?> nnt = TokenCache.getDelegationToken(creds, fs_addr);
  assertNotNull("Token for nn is null", nnt);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestTokenCache.java

示例5: super

import org.apache.hadoop.mapreduce.MRJobConfig; //导入依赖的package包/类
public DefaultSpeculator
    (Configuration conf, AppContext context,
     TaskRuntimeEstimator estimator, Clock clock) {
  super(DefaultSpeculator.class.getName());

  this.conf = conf;
  this.context = context;
  this.estimator = estimator;
  this.clock = clock;
  this.eventHandler = context.getEventHandler();
  this.soonestRetryAfterNoSpeculate =
      conf.getLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_NO_SPECULATE,
              MRJobConfig.DEFAULT_SPECULATIVE_RETRY_AFTER_NO_SPECULATE);
  this.soonestRetryAfterSpeculate =
      conf.getLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_SPECULATE,
              MRJobConfig.DEFAULT_SPECULATIVE_RETRY_AFTER_SPECULATE);
  this.proportionRunningTasksSpeculatable =
      conf.getDouble(MRJobConfig.SPECULATIVECAP_RUNNING_TASKS,
              MRJobConfig.DEFAULT_SPECULATIVECAP_RUNNING_TASKS);
  this.proportionTotalTasksSpeculatable =
      conf.getDouble(MRJobConfig.SPECULATIVECAP_TOTAL_TASKS,
              MRJobConfig.DEFAULT_SPECULATIVECAP_TOTAL_TASKS);
  this.minimumAllowedSpeculativeTasks =
      conf.getInt(MRJobConfig.SPECULATIVE_MINIMUM_ALLOWED_TASKS,
              MRJobConfig.DEFAULT_SPECULATIVE_MINIMUM_ALLOWED_TASKS);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:DefaultSpeculator.java

示例6: testSetupDistributedCacheConflictsFiles

import org.apache.hadoop.mapreduce.MRJobConfig; //导入依赖的package包/类
@SuppressWarnings("deprecation")
public void testSetupDistributedCacheConflictsFiles() throws Exception {
  Configuration conf = new Configuration();
  conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
  
  URI mockUri = URI.create("mockfs://mock/");
  FileSystem mockFs = ((FilterFileSystem)FileSystem.get(mockUri, conf))
      .getRawFileSystem();
  
  URI file = new URI("mockfs://mock/tmp/something.zip#something");
  Path filePath = new Path(file);
  URI file2 = new URI("mockfs://mock/tmp/something.txt#something");
  Path file2Path = new Path(file2);
  
  when(mockFs.resolvePath(filePath)).thenReturn(filePath);
  when(mockFs.resolvePath(file2Path)).thenReturn(file2Path);
  
  DistributedCache.addCacheFile(file, conf);
  DistributedCache.addCacheFile(file2, conf);
  conf.set(MRJobConfig.CACHE_FILE_TIMESTAMPS, "10,11");
  conf.set(MRJobConfig.CACHE_FILES_SIZES, "10,11");
  conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES, "true,true");
  Map<String, LocalResource> localResources = 
    new HashMap<String, LocalResource>();
  MRApps.setupDistributedCache(conf, localResources);
  
  assertEquals(1, localResources.size());
  LocalResource lr = localResources.get("something");
  //First one wins
  assertNotNull(lr);
  assertEquals(10l, lr.getSize());
  assertEquals(10l, lr.getTimestamp());
  assertEquals(LocalResourceType.FILE, lr.getType());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestMRApps.java

示例7: setConf

import org.apache.hadoop.mapreduce.MRJobConfig; //导入依赖的package包/类
public void setConf(Configuration conf) {
  this.conf = conf;
  keyFieldHelper = new KeyFieldHelper();
  String keyFieldSeparator = 
    conf.get(MRJobConfig.MAP_OUTPUT_KEY_FIELD_SEPERATOR, "\t");
  keyFieldHelper.setKeyFieldSeparator(keyFieldSeparator);
  if (conf.get("num.key.fields.for.partition") != null) {
    LOG.warn("Using deprecated num.key.fields.for.partition. " +
    		"Use mapreduce.partition.keypartitioner.options instead");
    this.numOfPartitionFields = conf.getInt("num.key.fields.for.partition",0);
    keyFieldHelper.setKeyFieldSpec(1,numOfPartitionFields);
  } else {
    String option = conf.get(PARTITIONER_OPTIONS);
    keyFieldHelper.parseOption(option);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:KeyFieldBasedPartitioner.java

示例8: testTotalOrderBinarySearch

import org.apache.hadoop.mapreduce.MRJobConfig; //导入依赖的package包/类
public void testTotalOrderBinarySearch() throws Exception {
  TotalOrderPartitioner<Text,NullWritable> partitioner =
    new TotalOrderPartitioner<Text,NullWritable>();
  Configuration conf = new Configuration();
  Path p = TestTotalOrderPartitioner.<Text>writePartitionFile(
      "totalorderbinarysearch", conf, splitStrings);
  conf.setBoolean(TotalOrderPartitioner.NATURAL_ORDER, false);
  conf.setClass(MRJobConfig.MAP_OUTPUT_KEY_CLASS, Text.class, Object.class);
  try {
    partitioner.setConf(conf);
    NullWritable nw = NullWritable.get();
    for (Check<Text> chk : testStrings) {
      assertEquals(chk.data.toString(), chk.part,
          partitioner.getPartition(chk.data, nw, splitStrings.length + 1));
    }
  } finally {
    p.getFileSystem(conf).delete(p, true);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestTotalOrderPartitioner.java

示例9: testTotalOrderMemCmp

import org.apache.hadoop.mapreduce.MRJobConfig; //导入依赖的package包/类
public void testTotalOrderMemCmp() throws Exception {
  TotalOrderPartitioner<Text,NullWritable> partitioner =
    new TotalOrderPartitioner<Text,NullWritable>();
  Configuration conf = new Configuration();
  Path p = TestTotalOrderPartitioner.<Text>writePartitionFile(
      "totalordermemcmp", conf, splitStrings);
  conf.setClass(MRJobConfig.MAP_OUTPUT_KEY_CLASS, Text.class, Object.class);
  try {
    partitioner.setConf(conf);
    NullWritable nw = NullWritable.get();
    for (Check<Text> chk : testStrings) {
      assertEquals(chk.data.toString(), chk.part,
          partitioner.getPartition(chk.data, nw, splitStrings.length + 1));
    }
  } finally {
    p.getFileSystem(conf).delete(p, true);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestTotalOrderPartitioner.java

示例10: configureHighRamProperties

import org.apache.hadoop.mapreduce.MRJobConfig; //导入依赖的package包/类
/**
 * Sets the high ram job properties in the simulated job's configuration.
 */
@SuppressWarnings("deprecation")
static void configureHighRamProperties(Configuration sourceConf, 
                                       Configuration destConf) {
  // set the memory per map task
  scaleConfigParameter(sourceConf, destConf, 
                       MRConfig.MAPMEMORY_MB, MRJobConfig.MAP_MEMORY_MB, 
                       MRJobConfig.DEFAULT_MAP_MEMORY_MB);
  
  // validate and fail early
  validateTaskMemoryLimits(destConf, MRJobConfig.MAP_MEMORY_MB, 
                           JTConfig.JT_MAX_MAPMEMORY_MB);
  
  // set the memory per reduce task
  scaleConfigParameter(sourceConf, destConf, 
                       MRConfig.REDUCEMEMORY_MB, MRJobConfig.REDUCE_MEMORY_MB,
                       MRJobConfig.DEFAULT_REDUCE_MEMORY_MB);
  // validate and fail early
  validateTaskMemoryLimits(destConf, MRJobConfig.REDUCE_MEMORY_MB, 
                           JTConfig.JT_MAX_REDUCEMEMORY_MB);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:GridmixJob.java

示例11: testBinaryTokenFile

import org.apache.hadoop.mapreduce.MRJobConfig; //导入依赖的package包/类
/**
 * run a distributed job and verify that TokenCache is available
 * @throws IOException
 */
@Test
public void testBinaryTokenFile() throws IOException {
  Configuration conf = mrCluster.getConfig();
  
  // provide namenodes names for the job to get the delegation tokens for
  final String nnUri = dfsCluster.getURI(0).toString();
  conf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);
  
  // using argument to pass the file name
  final String[] args = { 
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
      };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new MySleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestBinaryTokenFile.java

示例12: getRecordWriter

import org.apache.hadoop.mapreduce.MRJobConfig; //导入依赖的package包/类
/** {@inheritDoc} */
public RecordWriter<K, V> getRecordWriter(FileSystem filesystem,
    JobConf job, String name, Progressable progress) throws IOException {
  org.apache.hadoop.mapreduce.RecordWriter<K, V> w = super.getRecordWriter(
    new TaskAttemptContextImpl(job, 
          TaskAttemptID.forName(job.get(MRJobConfig.TASK_ATTEMPT_ID))));
  org.apache.hadoop.mapreduce.lib.db.DBOutputFormat.DBRecordWriter writer = 
   (org.apache.hadoop.mapreduce.lib.db.DBOutputFormat.DBRecordWriter) w;
  try {
    return new DBRecordWriter(writer.getConnection(), writer.getStatement());
  } catch(SQLException se) {
    throw new IOException(se);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:DBOutputFormat.java

示例13: createJobClassLoader

import org.apache.hadoop.mapreduce.MRJobConfig; //导入依赖的package包/类
/**
 * Creates a {@link ApplicationClassLoader} if
 * {@link MRJobConfig#MAPREDUCE_JOB_CLASSLOADER} is set to true, and
 * the APP_CLASSPATH environment variable is set.
 * @param conf
 * @return the created job classloader, or null if the job classloader is not
 * enabled or the APP_CLASSPATH environment variable is not set
 * @throws IOException
 */
public static ClassLoader createJobClassLoader(Configuration conf)
    throws IOException {
  ClassLoader jobClassLoader = null;
  if (conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, false)) {
    String appClasspath = System.getenv(Environment.APP_CLASSPATH.key());
    if (appClasspath == null) {
      LOG.warn("Not creating job classloader since APP_CLASSPATH is not set.");
    } else {
      LOG.info("Creating job classloader");
      if (LOG.isDebugEnabled()) {
        LOG.debug("APP_CLASSPATH=" + appClasspath);
      }
      String[] systemClasses = getSystemClasses(conf);
      jobClassLoader = createJobClassLoader(appClasspath,
          systemClasses);
    }
  }
  return jobClassLoader;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:MRApps.java

示例14: testDeletionofStagingOnKill

import org.apache.hadoop.mapreduce.MRJobConfig; //导入依赖的package包/类
@Test (timeout = 30000)
public void testDeletionofStagingOnKill() throws IOException {
  conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
  fs = mock(FileSystem.class);
  when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
  //Staging Dir exists
  String user = UserGroupInformation.getCurrentUser().getShortUserName();
  Path stagingDir = MRApps.getStagingAreaDir(conf, user);
  when(fs.exists(stagingDir)).thenReturn(true);
  ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(),
      0);
  ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 0);
  JobId jobid = recordFactory.newRecordInstance(JobId.class);
  jobid.setAppId(appId);
  ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
  MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc);
  appMaster.init(conf);
  //simulate the process being killed
  MRAppMaster.MRAppMasterShutdownHook hook = 
    new MRAppMaster.MRAppMasterShutdownHook(appMaster);
  hook.run();
  verify(fs, times(0)).delete(stagingJobPath, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestStagingCleanup.java

示例15: genArgs

import org.apache.hadoop.mapreduce.MRJobConfig; //导入依赖的package包/类
@Override
protected String[] genArgs() {
  // set the testcase-specific config properties first and the remaining
  // arguments are set in TestStreaming.genArgs().
  args.add("-jobconf");
  args.add(MRJobConfig.MAP_OUTPUT_KEY_CLASS +
      "=org.apache.hadoop.io.LongWritable");
  args.add("-jobconf");
  args.add(MRJobConfig.OUTPUT_KEY_CLASS +
      "=org.apache.hadoop.io.LongWritable");

  // Using SequenceFileOutputFormat here because with TextOutputFormat, the
  // mapred.output.key.class set in JobConf (which we want to test here) is
  // not read/used at all.
  args.add("-outputformat");
  args.add("org.apache.hadoop.mapred.SequenceFileOutputFormat");

  return super.genArgs();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestStreamingOutputKeyValueTypes.java


注:本文中的org.apache.hadoop.mapreduce.MRJobConfig类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。