当前位置: 首页>>代码示例>>Java>>正文


Java TaskAttemptContextImpl类代码示例

本文整理汇总了Java中org.apache.hadoop.mapred.TaskAttemptContextImpl的典型用法代码示例。如果您正苦于以下问题:Java TaskAttemptContextImpl类的具体用法?Java TaskAttemptContextImpl怎么用?Java TaskAttemptContextImpl使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


TaskAttemptContextImpl类属于org.apache.hadoop.mapred包,在下文中一共展示了TaskAttemptContextImpl类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: get

import org.apache.hadoop.mapred.TaskAttemptContextImpl; //导入依赖的package包/类
@Override @Nonnull
public List<Processor> get(int count) {
    return processorList = range(0, count).mapToObj(i -> {
        try {
            String uuid = context.jetInstance().getCluster().getLocalMember().getUuid();
            TaskAttemptID taskAttemptID = new TaskAttemptID("jet-node-" + uuid, jobContext.getJobID().getId(),
                    JOB_SETUP, i, 0);
            jobConf.set("mapred.task.id", taskAttemptID.toString());
            jobConf.setInt("mapred.task.partition", i);

            TaskAttemptContextImpl taskAttemptContext = new TaskAttemptContextImpl(jobConf, taskAttemptID);
            @SuppressWarnings("unchecked")
            OutputFormat<K, V> outFormat = jobConf.getOutputFormat();
            RecordWriter<K, V> recordWriter = outFormat.getRecordWriter(
                    null, jobConf, uuid + '-' + valueOf(i), Reporter.NULL);
            return new WriteHdfsP<>(
                    recordWriter, taskAttemptContext, outputCommitter, extractKeyFn, extractValueFn);
        } catch (IOException e) {
            throw new JetException(e);
        }

    }).collect(toList());
}
 
开发者ID:hazelcast,项目名称:hazelcast-jet,代码行数:24,代码来源:WriteHdfsP.java

示例2: transition

import org.apache.hadoop.mapred.TaskAttemptContextImpl; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  TaskAttemptContext taskContext =
    new TaskAttemptContextImpl(taskAttempt.conf,
        TypeConverter.fromYarn(taskAttempt.attemptId));
  taskAttempt.eventHandler.handle(new CommitterTaskAbortEvent(
      taskAttempt.attemptId, taskContext));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TaskAttemptImpl.java

示例3: testAbort

import org.apache.hadoop.mapred.TaskAttemptContextImpl; //导入依赖的package包/类
public void testAbort() throws IOException {
  JobConf job = new JobConf();
  setConfForFileOutputCommitter(job);
  JobContext jContext = new JobContextImpl(job, taskID.getJobID());
  TaskAttemptContext tContext = new TaskAttemptContextImpl(job, taskID);
  FileOutputCommitter committer = new FileOutputCommitter();
  FileOutputFormat.setWorkOutputPath(job, committer
      .getTaskAttemptPath(tContext));

  // do setup
  committer.setupJob(jContext);
  committer.setupTask(tContext);
  String file = "test.txt";

  // A reporter that does nothing
  Reporter reporter = Reporter.NULL;
  // write output
  FileSystem localFs = FileSystem.getLocal(job);
  TextOutputFormat theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(localFs,
      job, file, reporter);
  writeOutput(theRecordWriter, reporter);

  // do abort
  committer.abortTask(tContext);
  File expectedFile = new File(new Path(committer
      .getTaskAttemptPath(tContext), file).toString());
  assertFalse("task temp dir still exists", expectedFile.exists());

  committer.abortJob(jContext, JobStatus.State.FAILED);
  expectedFile = new File(new Path(outDir, FileOutputCommitter.TEMP_DIR_NAME)
      .toString());
  assertFalse("job temp dir "+expectedFile+" still exists", expectedFile.exists());
  assertEquals("Output directory not empty", 0, new File(outDir.toString())
      .listFiles().length);
  FileUtil.fullyDelete(new File(outDir.toString()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:TestMRCJCFileOutputCommitter.java

示例4: open

import org.apache.hadoop.mapred.TaskAttemptContextImpl; //导入依赖的package包/类
/**
 * create the temporary output file for hadoop RecordWriter.
 * @param taskNumber The number of the parallel instance.
 * @param numTasks The number of parallel tasks.
 * @throws java.io.IOException
 */
@Override
public void open(int taskNumber, int numTasks) throws IOException {

	// enforce sequential open() calls
	synchronized (OPEN_MUTEX) {
		if (Integer.toString(taskNumber + 1).length() > 6) {
			throw new IOException("Task id too large.");
		}

		TaskAttemptID taskAttemptID = TaskAttemptID.forName("attempt__0000_r_"
				+ String.format("%" + (6 - Integer.toString(taskNumber + 1).length()) + "s", " ").replace(" ", "0")
				+ Integer.toString(taskNumber + 1)
				+ "_0");

		this.jobConf.set("mapred.task.id", taskAttemptID.toString());
		this.jobConf.setInt("mapred.task.partition", taskNumber + 1);
		// for hadoop 2.2
		this.jobConf.set("mapreduce.task.attempt.id", taskAttemptID.toString());
		this.jobConf.setInt("mapreduce.task.partition", taskNumber + 1);

		this.context = new TaskAttemptContextImpl(this.jobConf, taskAttemptID);

		this.outputCommitter = this.jobConf.getOutputCommitter();

		JobContext jobContext = new JobContextImpl(this.jobConf, new JobID());

		this.outputCommitter.setupJob(jobContext);

		this.recordWriter = this.mapredOutputFormat.getRecordWriter(null, this.jobConf, Integer.toString(taskNumber + 1), new HadoopDummyProgressable());
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:38,代码来源:HadoopOutputFormatBase.java

示例5: WriteHdfsP

import org.apache.hadoop.mapred.TaskAttemptContextImpl; //导入依赖的package包/类
private WriteHdfsP(RecordWriter<K, V> recordWriter,
                   TaskAttemptContextImpl taskAttemptContext,
                   OutputCommitter outputCommitter,
                   DistributedFunction<? super T, K> extractKeyFn,
                   DistributedFunction<? super T, V> extractValueFn
) {
    this.recordWriter = recordWriter;
    this.taskAttemptContext = taskAttemptContext;
    this.outputCommitter = outputCommitter;
    this.extractKeyFn = extractKeyFn;
    this.extractValueFn = extractValueFn;
}
 
开发者ID:hazelcast,项目名称:hazelcast-jet,代码行数:13,代码来源:WriteHdfsP.java

示例6: commit

import org.apache.hadoop.mapred.TaskAttemptContextImpl; //导入依赖的package包/类
/**
 * Commit task.
 *
 * @throws IOException In failed.
 */
public void commit() throws IOException {
    if (writer != null) {
        OutputCommitter outputCommitter = jobConf.getOutputCommitter();

        TaskAttemptContext taskCtx = new TaskAttemptContextImpl(jobConf, attempt);

        if (outputCommitter.needsTaskCommit(taskCtx))
            outputCommitter.commitTask(taskCtx);
    }
}
 
开发者ID:apache,项目名称:ignite,代码行数:16,代码来源:HadoopV1OutputCollector.java

示例7: abort

import org.apache.hadoop.mapred.TaskAttemptContextImpl; //导入依赖的package包/类
/**
 * Abort task.
 */
public void abort() {
    try {
        if (writer != null)
            jobConf.getOutputCommitter().abortTask(new TaskAttemptContextImpl(jobConf, attempt));
    }
    catch (IOException ignore) {
        // No-op.
    }
}
 
开发者ID:apache,项目名称:ignite,代码行数:13,代码来源:HadoopV1OutputCollector.java

示例8: testAbort

import org.apache.hadoop.mapred.TaskAttemptContextImpl; //导入依赖的package包/类
public void testAbort() throws IOException {
  JobConf job = new JobConf();
  setConfForFileOutputCommitter(job);
  JobContext jContext = new JobContextImpl(job, taskID.getJobID());
  TaskAttemptContext tContext = new TaskAttemptContextImpl(job, taskID);
  FileOutputCommitter committer = new FileOutputCommitter();
  FileOutputFormat.setWorkOutputPath(job, committer
      .getTempTaskOutputPath(tContext));

  // do setup
  committer.setupJob(jContext);
  committer.setupTask(tContext);
  String file = "test.txt";

  // A reporter that does nothing
  Reporter reporter = Reporter.NULL;
  // write output
  FileSystem localFs = FileSystem.getLocal(job);
  TextOutputFormat theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(localFs,
      job, file, reporter);
  writeOutput(theRecordWriter, reporter);

  // do abort
  committer.abortTask(tContext);
  File expectedFile = new File(new Path(committer
      .getTempTaskOutputPath(tContext), file).toString());
  assertFalse("task temp dir still exists", expectedFile.exists());

  committer.abortJob(jContext, JobStatus.State.FAILED);
  expectedFile = new File(new Path(outDir, FileOutputCommitter.TEMP_DIR_NAME)
      .toString());
  assertFalse("job temp dir still exists", expectedFile.exists());
  assertEquals("Output directory not empty", 0, new File(outDir.toString())
      .listFiles().length);
  FileUtil.fullyDelete(new File(outDir.toString()));
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:38,代码来源:TestFileOutputCommitter.java

示例9: shouldReturnListsAsRecords

import org.apache.hadoop.mapred.TaskAttemptContextImpl; //导入依赖的package包/类
@Test public void shouldReturnListsAsRecords() throws Exception {
    JobConf conf = createConfig();
    TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());

    CSVNLineInputFormat inputFormat = new CSVNLineInputFormat();
    List<InputSplit> actualSplits = inputFormat.getSplits(new JobContextImpl(conf, new JobID()) {

    });
    RecordReader<LongWritable, List<Text>> recordReader = inputFormat.createRecordReader(actualSplits.get(0), context);

    recordReader.initialize(actualSplits.get(0), context);

    recordReader.nextKeyValue();
    List<Text> firstLineValue = recordReader.getCurrentValue();

    assertEquals("Joe Demo", firstLineValue.get(0).toString());
    assertEquals("2 Demo Street,\nDemoville,\nAustralia. 2615", firstLineValue.get(1).toString());
    assertEquals("[email protected]", firstLineValue.get(2).toString());

    recordReader.nextKeyValue();
    List<Text> secondLineValue = recordReader.getCurrentValue();

    assertEquals("Jim Sample", secondLineValue.get(0).toString());
    assertEquals("", secondLineValue.get(1).toString());
    assertEquals("[email protected]", secondLineValue.get(2).toString());

    recordReader.nextKeyValue();
    List<Text> thirdLineValue = recordReader.getCurrentValue();

    assertEquals("Jack Example", thirdLineValue.get(0).toString());
    assertEquals("1 Example Street, Exampleville, Australia.\n2615", thirdLineValue.get(1).toString());
    assertEquals("[email protected]", thirdLineValue.get(2).toString());
}
 
开发者ID:mvallebr,项目名称:CSVInputFormat,代码行数:34,代码来源:CSVNLineInputFormatTest.java

示例10: testCommitter

import org.apache.hadoop.mapred.TaskAttemptContextImpl; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public void testCommitter() throws Exception {
  JobConf job = new JobConf();
  setConfForFileOutputCommitter(job);
  JobContext jContext = new JobContextImpl(job, taskID.getJobID());
  TaskAttemptContext tContext = new TaskAttemptContextImpl(job, taskID);
  FileOutputCommitter committer = new FileOutputCommitter();
  FileOutputFormat.setWorkOutputPath(job, 
    committer.getTaskAttemptPath(tContext));

  committer.setupJob(jContext);
  committer.setupTask(tContext);
  String file = "test.txt";

  // A reporter that does nothing
  Reporter reporter = Reporter.NULL;
  // write output
  FileSystem localFs = FileSystem.getLocal(job);
  TextOutputFormat theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter =
    theOutputFormat.getRecordWriter(localFs, job, file, reporter);
  writeOutput(theRecordWriter, reporter);

  // do commit
  committer.commitTask(tContext);
  committer.commitJob(jContext);
  
  // validate output
  File expectedFile = new File(new Path(outDir, file).toString());
  StringBuffer expectedOutput = new StringBuffer();
  expectedOutput.append(key1).append('\t').append(val1).append("\n");
  expectedOutput.append(val1).append("\n");
  expectedOutput.append(val2).append("\n");
  expectedOutput.append(key2).append("\n");
  expectedOutput.append(key1).append("\n");
  expectedOutput.append(key2).append('\t').append(val2).append("\n");
  String output = UtilsForTests.slurp(expectedFile);
  assertEquals(output, expectedOutput.toString());

  FileUtil.fullyDelete(new File(outDir.toString()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestMRCJCFileOutputCommitter.java

示例11: testCommitter

import org.apache.hadoop.mapred.TaskAttemptContextImpl; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public void testCommitter() throws Exception {
  JobConf job = new JobConf();
  job.set("mapred.task.id", attempt);
  job.setOutputCommitter(FileOutputCommitter.class);
  FileOutputFormat.setOutputPath(job, outDir);
  JobContext jContext = new JobContextImpl(job, taskID.getJobID());
  TaskAttemptContext tContext = new TaskAttemptContextImpl(job, taskID);
  FileOutputCommitter committer = new FileOutputCommitter();
  FileOutputFormat.setWorkOutputPath(job, 
    committer.getTempTaskOutputPath(tContext));

  committer.setupJob(jContext);
  committer.setupTask(tContext);
  String file = "test.txt";

  // A reporter that does nothing
  Reporter reporter = Reporter.NULL;
  FileSystem localFs = FileSystem.getLocal(job);
  TextOutputFormat theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter =
    theOutputFormat.getRecordWriter(localFs, job, file, reporter);
  Text key1 = new Text("key1");
  Text key2 = new Text("key2");
  Text val1 = new Text("val1");
  Text val2 = new Text("val2");
  NullWritable nullWritable = NullWritable.get();

  try {
    theRecordWriter.write(key1, val1);
    theRecordWriter.write(null, nullWritable);
    theRecordWriter.write(null, val1);
    theRecordWriter.write(nullWritable, val2);
    theRecordWriter.write(key2, nullWritable);
    theRecordWriter.write(key1, null);
    theRecordWriter.write(null, null);
    theRecordWriter.write(key2, val2);
  } finally {
    theRecordWriter.close(reporter);
  }
  committer.commitTask(tContext);
  committer.commitJob(jContext);
  
  File expectedFile = new File(new Path(outDir, file).toString());
  StringBuffer expectedOutput = new StringBuffer();
  expectedOutput.append(key1).append('\t').append(val1).append("\n");
  expectedOutput.append(val1).append("\n");
  expectedOutput.append(val2).append("\n");
  expectedOutput.append(key2).append("\n");
  expectedOutput.append(key1).append("\n");
  expectedOutput.append(key2).append('\t').append(val2).append("\n");
  String output = UtilsForTests.slurp(expectedFile);
  assertEquals(output, expectedOutput.toString());
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:55,代码来源:TestFileOutputCommitter.java

示例12: testCommitter

import org.apache.hadoop.mapred.TaskAttemptContextImpl; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public void testCommitter() throws Exception {
  JobConf job = new JobConf();
  setConfForFileOutputCommitter(job);
  JobContext jContext = new JobContextImpl(job, taskID.getJobID());
  TaskAttemptContext tContext = new TaskAttemptContextImpl(job, taskID);
  FileOutputCommitter committer = new FileOutputCommitter();
  FileOutputFormat.setWorkOutputPath(job, 
    committer.getTempTaskOutputPath(tContext));

  committer.setupJob(jContext);
  committer.setupTask(tContext);
  String file = "test.txt";

  // A reporter that does nothing
  Reporter reporter = Reporter.NULL;
  // write output
  FileSystem localFs = FileSystem.getLocal(job);
  TextOutputFormat theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter =
    theOutputFormat.getRecordWriter(localFs, job, file, reporter);
  writeOutput(theRecordWriter, reporter);

  // do commit
  committer.commitTask(tContext);
  committer.commitJob(jContext);
  
  // validate output
  File expectedFile = new File(new Path(outDir, file).toString());
  StringBuffer expectedOutput = new StringBuffer();
  expectedOutput.append(key1).append('\t').append(val1).append("\n");
  expectedOutput.append(val1).append("\n");
  expectedOutput.append(val2).append("\n");
  expectedOutput.append(key2).append("\n");
  expectedOutput.append(key1).append("\n");
  expectedOutput.append(key2).append('\t').append(val2).append("\n");
  String output = UtilsForTests.slurp(expectedFile);
  assertEquals(output, expectedOutput.toString());

  FileUtil.fullyDelete(new File(outDir.toString()));
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:42,代码来源:TestFileOutputCommitter.java

示例13: setup

import org.apache.hadoop.mapred.TaskAttemptContextImpl; //导入依赖的package包/类
/**
 * Setup task.
 *
 * @throws IOException If failed.
 */
public void setup() throws IOException {
    if (writer != null)
        jobConf.getOutputCommitter().setupTask(new TaskAttemptContextImpl(jobConf, attempt));
}
 
开发者ID:apache,项目名称:ignite,代码行数:10,代码来源:HadoopV1OutputCollector.java


注:本文中的org.apache.hadoop.mapred.TaskAttemptContextImpl类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。