当前位置: 首页>>代码示例>>Java>>正文


Java JobContextImpl类代码示例

本文整理汇总了Java中org.apache.hadoop.mapred.JobContextImpl的典型用法代码示例。如果您正苦于以下问题:Java JobContextImpl类的具体用法?Java JobContextImpl怎么用?Java JobContextImpl使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


JobContextImpl类属于org.apache.hadoop.mapred包,在下文中一共展示了JobContextImpl类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testAbort

import org.apache.hadoop.mapred.JobContextImpl; //导入依赖的package包/类
public void testAbort() throws IOException {
  JobConf job = new JobConf();
  setConfForFileOutputCommitter(job);
  JobContext jContext = new JobContextImpl(job, taskID.getJobID());
  TaskAttemptContext tContext = new TaskAttemptContextImpl(job, taskID);
  FileOutputCommitter committer = new FileOutputCommitter();
  FileOutputFormat.setWorkOutputPath(job, committer
      .getTaskAttemptPath(tContext));

  // do setup
  committer.setupJob(jContext);
  committer.setupTask(tContext);
  String file = "test.txt";

  // A reporter that does nothing
  Reporter reporter = Reporter.NULL;
  // write output
  FileSystem localFs = FileSystem.getLocal(job);
  TextOutputFormat theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(localFs,
      job, file, reporter);
  writeOutput(theRecordWriter, reporter);

  // do abort
  committer.abortTask(tContext);
  File expectedFile = new File(new Path(committer
      .getTaskAttemptPath(tContext), file).toString());
  assertFalse("task temp dir still exists", expectedFile.exists());

  committer.abortJob(jContext, JobStatus.State.FAILED);
  expectedFile = new File(new Path(outDir, FileOutputCommitter.TEMP_DIR_NAME)
      .toString());
  assertFalse("job temp dir "+expectedFile+" still exists", expectedFile.exists());
  assertEquals("Output directory not empty", 0, new File(outDir.toString())
      .listFiles().length);
  FileUtil.fullyDelete(new File(outDir.toString()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:TestMRCJCFileOutputCommitter.java

示例2: open

import org.apache.hadoop.mapred.JobContextImpl; //导入依赖的package包/类
/**
 * create the temporary output file for hadoop RecordWriter.
 * @param taskNumber The number of the parallel instance.
 * @param numTasks The number of parallel tasks.
 * @throws java.io.IOException
 */
@Override
public void open(int taskNumber, int numTasks) throws IOException {

	// enforce sequential open() calls
	synchronized (OPEN_MUTEX) {
		if (Integer.toString(taskNumber + 1).length() > 6) {
			throw new IOException("Task id too large.");
		}

		TaskAttemptID taskAttemptID = TaskAttemptID.forName("attempt__0000_r_"
				+ String.format("%" + (6 - Integer.toString(taskNumber + 1).length()) + "s", " ").replace(" ", "0")
				+ Integer.toString(taskNumber + 1)
				+ "_0");

		this.jobConf.set("mapred.task.id", taskAttemptID.toString());
		this.jobConf.setInt("mapred.task.partition", taskNumber + 1);
		// for hadoop 2.2
		this.jobConf.set("mapreduce.task.attempt.id", taskAttemptID.toString());
		this.jobConf.setInt("mapreduce.task.partition", taskNumber + 1);

		this.context = new TaskAttemptContextImpl(this.jobConf, taskAttemptID);

		this.outputCommitter = this.jobConf.getOutputCommitter();

		JobContext jobContext = new JobContextImpl(this.jobConf, new JobID());

		this.outputCommitter.setupJob(jobContext);

		this.recordWriter = this.mapredOutputFormat.getRecordWriter(null, this.jobConf, Integer.toString(taskNumber + 1), new HadoopDummyProgressable());
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:38,代码来源:HadoopOutputFormatBase.java

示例3: finalizeGlobal

import org.apache.hadoop.mapred.JobContextImpl; //导入依赖的package包/类
@Override
public void finalizeGlobal(int parallelism) throws IOException {

	try {
		JobContext jobContext = new JobContextImpl(this.jobConf, new JobID());
		OutputCommitter outputCommitter = this.jobConf.getOutputCommitter();

		// finalize HDFS output format
		outputCommitter.commitJob(jobContext);
	} catch (Exception e) {
		throw new RuntimeException(e);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:14,代码来源:HadoopOutputFormatBase.java

示例4: HadoopV2TaskContext

import org.apache.hadoop.mapred.JobContextImpl; //导入依赖的package包/类
/**
 * @param taskInfo Task info.
 * @param job Job.
 * @param jobId Job ID.
 * @param locNodeId Local node ID.
 * @param jobConfDataInput DataInput for read JobConf.
 */
public HadoopV2TaskContext(HadoopTaskInfo taskInfo, HadoopJobEx job, HadoopJobId jobId,
    @Nullable UUID locNodeId, DataInput jobConfDataInput) throws IgniteCheckedException {
    super(taskInfo, job);
    this.locNodeId = locNodeId;

    // Before create JobConf instance we should set new context class loader.
    ClassLoader oldLdr = HadoopCommonUtils.setContextClassLoader(getClass().getClassLoader());

    try {
        JobConf jobConf = new JobConf();

        try {
            jobConf.readFields(jobConfDataInput);
        }
        catch (IOException e) {
            throw new IgniteCheckedException(e);
        }

        // For map-reduce jobs prefer local writes.
        jobConf.setBooleanIfUnset(PARAM_IGFS_PREFER_LOCAL_WRITES, true);

        initializePartiallyRawComparator(jobConf);

        jobCtx = new JobContextImpl(jobConf, new JobID(jobId.globalId().toString(), jobId.localId()));

        useNewMapper = jobConf.getUseNewMapper();
        useNewReducer = jobConf.getUseNewReducer();
        useNewCombiner = jobConf.getCombinerClass() == null;
    }
    finally {
        HadoopCommonUtils.restoreContextClassLoader(oldLdr);
    }
}
 
开发者ID:apache,项目名称:ignite,代码行数:41,代码来源:HadoopV2TaskContext.java

示例5: HadoopV2JobResourceManager

import org.apache.hadoop.mapred.JobContextImpl; //导入依赖的package包/类
/**
 * Creates new instance.
 * @param jobId Job ID.
 * @param ctx Hadoop job context.
 * @param log Logger.
 */
public HadoopV2JobResourceManager(HadoopJobId jobId, JobContextImpl ctx, IgniteLogger log, HadoopV2Job job) {
    this.jobId = jobId;
    this.ctx = ctx;
    this.log = log.getLogger(HadoopV2JobResourceManager.class);
    this.job = job;
}
 
开发者ID:apache,项目名称:ignite,代码行数:13,代码来源:HadoopV2JobResourceManager.java

示例6: testAbort

import org.apache.hadoop.mapred.JobContextImpl; //导入依赖的package包/类
public void testAbort() throws IOException {
  JobConf job = new JobConf();
  setConfForFileOutputCommitter(job);
  JobContext jContext = new JobContextImpl(job, taskID.getJobID());
  TaskAttemptContext tContext = new TaskAttemptContextImpl(job, taskID);
  FileOutputCommitter committer = new FileOutputCommitter();
  FileOutputFormat.setWorkOutputPath(job, committer
      .getTempTaskOutputPath(tContext));

  // do setup
  committer.setupJob(jContext);
  committer.setupTask(tContext);
  String file = "test.txt";

  // A reporter that does nothing
  Reporter reporter = Reporter.NULL;
  // write output
  FileSystem localFs = FileSystem.getLocal(job);
  TextOutputFormat theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(localFs,
      job, file, reporter);
  writeOutput(theRecordWriter, reporter);

  // do abort
  committer.abortTask(tContext);
  File expectedFile = new File(new Path(committer
      .getTempTaskOutputPath(tContext), file).toString());
  assertFalse("task temp dir still exists", expectedFile.exists());

  committer.abortJob(jContext, JobStatus.State.FAILED);
  expectedFile = new File(new Path(outDir, FileOutputCommitter.TEMP_DIR_NAME)
      .toString());
  assertFalse("job temp dir still exists", expectedFile.exists());
  assertEquals("Output directory not empty", 0, new File(outDir.toString())
      .listFiles().length);
  FileUtil.fullyDelete(new File(outDir.toString()));
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:38,代码来源:TestFileOutputCommitter.java

示例7: shouldReturnListsAsRecords

import org.apache.hadoop.mapred.JobContextImpl; //导入依赖的package包/类
@Test public void shouldReturnListsAsRecords() throws Exception {
    JobConf conf = createConfig();
    TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());

    CSVNLineInputFormat inputFormat = new CSVNLineInputFormat();
    List<InputSplit> actualSplits = inputFormat.getSplits(new JobContextImpl(conf, new JobID()) {

    });
    RecordReader<LongWritable, List<Text>> recordReader = inputFormat.createRecordReader(actualSplits.get(0), context);

    recordReader.initialize(actualSplits.get(0), context);

    recordReader.nextKeyValue();
    List<Text> firstLineValue = recordReader.getCurrentValue();

    assertEquals("Joe Demo", firstLineValue.get(0).toString());
    assertEquals("2 Demo Street,\nDemoville,\nAustralia. 2615", firstLineValue.get(1).toString());
    assertEquals("[email protected]", firstLineValue.get(2).toString());

    recordReader.nextKeyValue();
    List<Text> secondLineValue = recordReader.getCurrentValue();

    assertEquals("Jim Sample", secondLineValue.get(0).toString());
    assertEquals("", secondLineValue.get(1).toString());
    assertEquals("[email protected]", secondLineValue.get(2).toString());

    recordReader.nextKeyValue();
    List<Text> thirdLineValue = recordReader.getCurrentValue();

    assertEquals("Jack Example", thirdLineValue.get(0).toString());
    assertEquals("1 Example Street, Exampleville, Australia.\n2615", thirdLineValue.get(1).toString());
    assertEquals("[email protected]", thirdLineValue.get(2).toString());
}
 
开发者ID:mvallebr,项目名称:CSVInputFormat,代码行数:34,代码来源:CSVNLineInputFormatTest.java

示例8: testCommitter

import org.apache.hadoop.mapred.JobContextImpl; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public void testCommitter() throws Exception {
  JobConf job = new JobConf();
  setConfForFileOutputCommitter(job);
  JobContext jContext = new JobContextImpl(job, taskID.getJobID());
  TaskAttemptContext tContext = new TaskAttemptContextImpl(job, taskID);
  FileOutputCommitter committer = new FileOutputCommitter();
  FileOutputFormat.setWorkOutputPath(job, 
    committer.getTaskAttemptPath(tContext));

  committer.setupJob(jContext);
  committer.setupTask(tContext);
  String file = "test.txt";

  // A reporter that does nothing
  Reporter reporter = Reporter.NULL;
  // write output
  FileSystem localFs = FileSystem.getLocal(job);
  TextOutputFormat theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter =
    theOutputFormat.getRecordWriter(localFs, job, file, reporter);
  writeOutput(theRecordWriter, reporter);

  // do commit
  committer.commitTask(tContext);
  committer.commitJob(jContext);
  
  // validate output
  File expectedFile = new File(new Path(outDir, file).toString());
  StringBuffer expectedOutput = new StringBuffer();
  expectedOutput.append(key1).append('\t').append(val1).append("\n");
  expectedOutput.append(val1).append("\n");
  expectedOutput.append(val2).append("\n");
  expectedOutput.append(key2).append("\n");
  expectedOutput.append(key1).append("\n");
  expectedOutput.append(key2).append('\t').append(val2).append("\n");
  String output = UtilsForTests.slurp(expectedFile);
  assertEquals(output, expectedOutput.toString());

  FileUtil.fullyDelete(new File(outDir.toString()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestMRCJCFileOutputCommitter.java

示例9: testCommitter

import org.apache.hadoop.mapred.JobContextImpl; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public void testCommitter() throws Exception {
  JobConf job = new JobConf();
  job.set("mapred.task.id", attempt);
  job.setOutputCommitter(FileOutputCommitter.class);
  FileOutputFormat.setOutputPath(job, outDir);
  JobContext jContext = new JobContextImpl(job, taskID.getJobID());
  TaskAttemptContext tContext = new TaskAttemptContextImpl(job, taskID);
  FileOutputCommitter committer = new FileOutputCommitter();
  FileOutputFormat.setWorkOutputPath(job, 
    committer.getTempTaskOutputPath(tContext));

  committer.setupJob(jContext);
  committer.setupTask(tContext);
  String file = "test.txt";

  // A reporter that does nothing
  Reporter reporter = Reporter.NULL;
  FileSystem localFs = FileSystem.getLocal(job);
  TextOutputFormat theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter =
    theOutputFormat.getRecordWriter(localFs, job, file, reporter);
  Text key1 = new Text("key1");
  Text key2 = new Text("key2");
  Text val1 = new Text("val1");
  Text val2 = new Text("val2");
  NullWritable nullWritable = NullWritable.get();

  try {
    theRecordWriter.write(key1, val1);
    theRecordWriter.write(null, nullWritable);
    theRecordWriter.write(null, val1);
    theRecordWriter.write(nullWritable, val2);
    theRecordWriter.write(key2, nullWritable);
    theRecordWriter.write(key1, null);
    theRecordWriter.write(null, null);
    theRecordWriter.write(key2, val2);
  } finally {
    theRecordWriter.close(reporter);
  }
  committer.commitTask(tContext);
  committer.commitJob(jContext);
  
  File expectedFile = new File(new Path(outDir, file).toString());
  StringBuffer expectedOutput = new StringBuffer();
  expectedOutput.append(key1).append('\t').append(val1).append("\n");
  expectedOutput.append(val1).append("\n");
  expectedOutput.append(val2).append("\n");
  expectedOutput.append(key2).append("\n");
  expectedOutput.append(key1).append("\n");
  expectedOutput.append(key2).append('\t').append(val2).append("\n");
  String output = UtilsForTests.slurp(expectedFile);
  assertEquals(output, expectedOutput.toString());
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:55,代码来源:TestFileOutputCommitter.java

示例10: testTaskCleanupDoesNotCommit

import org.apache.hadoop.mapred.JobContextImpl; //导入依赖的package包/类
/**
 * A test that mimics a failed task to ensure that it does
 * not get into the COMMIT_PENDING state, by using a fake
 * UmbilicalProtocol's implementation that fails if the commit.
 * protocol is played.
 * 
 * The test mocks the various steps in a failed task's 
 * life-cycle using a special OutputCommitter and UmbilicalProtocol
 * implementation.
 * 
 * @throws Exception
 */
public void testTaskCleanupDoesNotCommit() throws Exception {
  // Mimic a job with a special committer that does not cleanup
  // files when a task fails.
  JobConf job = new JobConf();
  job.setOutputCommitter(CommitterWithoutCleanup.class);
  Path outDir = new Path(rootDir, "output"); 
  FileOutputFormat.setOutputPath(job, outDir);

  // Mimic job setup
  String dummyAttemptID = "attempt_200707121733_0001_m_000000_0";
  TaskAttemptID attemptID = TaskAttemptID.forName(dummyAttemptID);
  OutputCommitter committer = new CommitterWithoutCleanup();
  JobContext jContext = new JobContextImpl(job, attemptID.getJobID());
  committer.setupJob(jContext);
  

  // Mimic a map task
  dummyAttemptID = "attempt_200707121733_0001_m_000001_0";
  attemptID = TaskAttemptID.forName(dummyAttemptID);
  Task task = new MapTask(new Path(rootDir, "job.xml").toString(), attemptID,
      0, null, 1);
  task.setConf(job);
  task.localizeConfiguration(job);
  task.initialize(job, attemptID.getJobID(), Reporter.NULL, false);
  
  // Mimic the map task writing some output.
  String file = "test.txt";
  FileSystem localFs = FileSystem.getLocal(job);
  TextOutputFormat<Text, Text> theOutputFormat 
    = new TextOutputFormat<Text, Text>();
  RecordWriter<Text, Text> theRecordWriter = 
    theOutputFormat.getRecordWriter(localFs,
      job, file, Reporter.NULL);
  theRecordWriter.write(new Text("key"), new Text("value"));
  theRecordWriter.close(Reporter.NULL);

  // Mimic a task failure; setting up the task for cleanup simulates
  // the abort protocol to be played.
  // Without checks in the framework, this will fail
  // as the committer will cause a COMMIT to happen for
  // the cleanup task.
  task.setTaskCleanupTask();
  MyUmbilical umbilical = new MyUmbilical();
  task.run(job, umbilical);
  assertTrue("Task did not succeed", umbilical.taskDone);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:59,代码来源:TestTaskCommit.java

示例11: init

import org.apache.hadoop.mapred.JobContextImpl; //导入依赖的package包/类
@Override
public void init(@Nonnull Context context) {
    outputCommitter = jobConf.getOutputCommitter();
    jobContext = new JobContextImpl(jobConf, new JobID());
    uncheckRun(() -> outputCommitter.setupJob(jobContext));
}
 
开发者ID:hazelcast,项目名称:hazelcast-jet,代码行数:7,代码来源:WriteHdfsP.java

示例12: testCommitter

import org.apache.hadoop.mapred.JobContextImpl; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public void testCommitter() throws Exception {
  JobConf job = new JobConf();
  setConfForFileOutputCommitter(job);
  JobContext jContext = new JobContextImpl(job, taskID.getJobID());
  TaskAttemptContext tContext = new TaskAttemptContextImpl(job, taskID);
  FileOutputCommitter committer = new FileOutputCommitter();
  FileOutputFormat.setWorkOutputPath(job, 
    committer.getTempTaskOutputPath(tContext));

  committer.setupJob(jContext);
  committer.setupTask(tContext);
  String file = "test.txt";

  // A reporter that does nothing
  Reporter reporter = Reporter.NULL;
  // write output
  FileSystem localFs = FileSystem.getLocal(job);
  TextOutputFormat theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter =
    theOutputFormat.getRecordWriter(localFs, job, file, reporter);
  writeOutput(theRecordWriter, reporter);

  // do commit
  committer.commitTask(tContext);
  committer.commitJob(jContext);
  
  // validate output
  File expectedFile = new File(new Path(outDir, file).toString());
  StringBuffer expectedOutput = new StringBuffer();
  expectedOutput.append(key1).append('\t').append(val1).append("\n");
  expectedOutput.append(val1).append("\n");
  expectedOutput.append(val2).append("\n");
  expectedOutput.append(key2).append("\n");
  expectedOutput.append(key1).append("\n");
  expectedOutput.append(key2).append('\t').append(val2).append("\n");
  String output = UtilsForTests.slurp(expectedFile);
  assertEquals(output, expectedOutput.toString());

  FileUtil.fullyDelete(new File(outDir.toString()));
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:42,代码来源:TestFileOutputCommitter.java

示例13: jobContext

import org.apache.hadoop.mapred.JobContextImpl; //导入依赖的package包/类
/**
 * Gets job context of the task.
 *
 * @return Job context.
 */
public JobContextImpl jobContext() {
    return jobCtx;
}
 
开发者ID:apache,项目名称:ignite,代码行数:9,代码来源:HadoopV2TaskContext.java


注:本文中的org.apache.hadoop.mapred.JobContextImpl类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。