当前位置: 首页>>代码示例>>Java>>正文


Java TaskAttemptContext类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.TaskAttemptContext的典型用法代码示例。如果您正苦于以下问题:Java TaskAttemptContext类的具体用法?Java TaskAttemptContext怎么用?Java TaskAttemptContext使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


TaskAttemptContext类属于org.apache.hadoop.mapreduce包,在下文中一共展示了TaskAttemptContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: writeOutput

import org.apache.hadoop.mapreduce.TaskAttemptContext; //导入依赖的package包/类
private void writeOutput(RecordWriter theRecordWriter,
    TaskAttemptContext context) throws IOException, InterruptedException {
  NullWritable nullWritable = NullWritable.get();

  try {
    theRecordWriter.write(key1, val1);
    theRecordWriter.write(null, nullWritable);
    theRecordWriter.write(null, val1);
    theRecordWriter.write(nullWritable, val2);
    theRecordWriter.write(key2, nullWritable);
    theRecordWriter.write(key1, null);
    theRecordWriter.write(null, null);
    theRecordWriter.write(key2, val2);
  } finally {
    theRecordWriter.close(context);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestFileOutputCommitter.java

示例2: NewTrackingRecordReader

import org.apache.hadoop.mapreduce.TaskAttemptContext; //导入依赖的package包/类
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
    org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
    TaskReporter reporter,
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException, IOException {
  this.reporter = reporter;
  this.inputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_INPUT_RECORDS);
  this.fileInputByteCounter = reporter
      .getCounter(FileInputFormatCounter.BYTES_READ);

  List <Statistics> matchedStats = null;
  if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
    matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
        .getPath(), taskContext.getConfiguration());
  }
  fsStats = matchedStats;

  long bytesInPrev = getInputBytes(fsStats);
  this.real = inputFormat.createRecordReader(split, taskContext);
  long bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:MapTask.java

示例3: writeRandomKeyValues

import org.apache.hadoop.mapreduce.TaskAttemptContext; //导入依赖的package包/类
/**
 * Write random values to the writer assuming a table created using
 * {@link #FAMILIES} as column family descriptors
 */
private void writeRandomKeyValues(RecordWriter<ImmutableBytesWritable, KeyValue> writer,
    TaskAttemptContext context, Set<byte[]> families, int numRows)
    throws IOException, InterruptedException {
  byte keyBytes[] = new byte[Bytes.SIZEOF_INT];
  int valLength = 10;
  byte valBytes[] = new byte[valLength];

  int taskId = context.getTaskAttemptID().getTaskID().getId();
  assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
  final byte [] qualifier = Bytes.toBytes("data");
  Random random = new Random();
  for (int i = 0; i < numRows; i++) {

    Bytes.putInt(keyBytes, 0, i);
    random.nextBytes(valBytes);
    ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);

    for (byte[] family : families) {
      KeyValue kv = new KeyValue(keyBytes, family, qualifier, valBytes);
      writer.write(key, kv);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:TestHFileOutputFormat.java

示例4: applyMapperJdbcUrl

import org.apache.hadoop.mapreduce.TaskAttemptContext; //导入依赖的package包/类
protected void applyMapperJdbcUrl(TaskAttemptContext context, int mapperId) {

    Configuration conf = context.getConfiguration();

    // Retrieve the JDBC URL that should be used by this mapper.
    // We achieve this by modifying the JDBC URL property in the
    // configuration, prior to the OraOopDBRecordWriter's (ancestral)
    // constructor using the configuration to establish a connection
    // to the database - via DBConfiguration.getConnection()...
    String mapperJdbcUrlPropertyName =
        OraOopUtilities.getMapperJdbcUrlPropertyName(mapperId, conf);

    // Get this mapper's JDBC URL
    String mapperJdbcUrl = conf.get(mapperJdbcUrlPropertyName, null);

    LOG.debug(String.format("Mapper %d has a JDBC URL of: %s", mapperId,
        mapperJdbcUrl == null ? "<null>" : mapperJdbcUrl));

    if (mapperJdbcUrl != null) {
      conf.set(DBConfiguration.URL_PROPERTY, mapperJdbcUrl);
    }
  }
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:23,代码来源:OraOopOutputFormatBase.java

示例5: OraOopDBRecordWriterBase

import org.apache.hadoop.mapreduce.TaskAttemptContext; //导入依赖的package包/类
public OraOopDBRecordWriterBase(TaskAttemptContext context, int mapperId)
    throws ClassNotFoundException, SQLException {

  super(context);
  this.mapperId = mapperId;
  this.mapperRowNumber = 1;

  Configuration conf = context.getConfiguration();

  // Log any info that might be useful to us...
  logBatchSettings();

  // Connect to Oracle...
  Connection connection = this.getConnection();

  String thisOracleInstanceName =
      OraOopOracleQueries.getCurrentOracleInstanceName(connection);
  LOG.info(String.format(
      "This record writer is connected to Oracle via the JDBC URL: \n"
          + "\t\"%s\"\n" + "\tto the Oracle instance: \"%s\"", connection
          .toString(), thisOracleInstanceName));

  // Initialize the Oracle session...
  OracleConnectionFactory.initializeOracleConnection(connection, conf);
  connection.setAutoCommit(false);
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:27,代码来源:OraOopOutputFormatBase.java

示例6: getExportTableAndColumns

import org.apache.hadoop.mapreduce.TaskAttemptContext; //导入依赖的package包/类
protected void getExportTableAndColumns(TaskAttemptContext context)
    throws SQLException {

  Configuration conf = context.getConfiguration();

  String schema =
      context.getConfiguration().get(OraOopConstants.ORAOOP_TABLE_OWNER);
  String localTableName =
      context.getConfiguration().get(OraOopConstants.ORAOOP_TABLE_NAME);

  if (schema == null || schema.isEmpty() || localTableName == null
      || localTableName.isEmpty()) {
    throw new RuntimeException(
        "Unable to recall the schema and name of the Oracle table "
        + "being exported.");
  }

  this.oracleTable = new OracleTable(schema, localTableName);

  setOracleTableColumns(OraOopOracleQueries.getTableColumns(this
      .getConnection(), this.oracleTable, OraOopUtilities
      .omitLobAndLongColumnsDuringImport(conf), OraOopUtilities
      .recallSqoopJobType(conf), true // <- onlyOraOopSupportedTypes
      , false // <- omitOraOopPseudoColumns
      ));
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:27,代码来源:OraOopOutputFormatBase.java

示例7: readSplit

import org.apache.hadoop.mapreduce.TaskAttemptContext; //导入依赖的package包/类
private static List<Text> readSplit(InputFormat<LongWritable,Text> format,
  InputSplit split, Job job) throws IOException, InterruptedException {
  List<Text> result = new ArrayList<Text>();
  Configuration conf = job.getConfiguration();
  TaskAttemptContext context = MapReduceTestUtil.
    createDummyMapTaskAttemptContext(conf);
  RecordReader<LongWritable, Text> reader = format.createRecordReader(split,
    MapReduceTestUtil.createDummyMapTaskAttemptContext(conf));
  MapContext<LongWritable,Text,LongWritable,Text> mcontext =
    new MapContextImpl<LongWritable,Text,LongWritable,Text>(conf,
    context.getTaskAttemptID(), reader, null, null,
    MapReduceTestUtil.createDummyReporter(),
    split);
  reader.initialize(split, mcontext);
  while (reader.nextKeyValue()) {
    result.add(new Text(reader.getCurrentValue()));
  }
  return result;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestCombineTextInputFormat.java

示例8: writeRandomKeyValues

import org.apache.hadoop.mapreduce.TaskAttemptContext; //导入依赖的package包/类
/**
 * Write random values to the writer assuming a table created using
 * {@link #FAMILIES} as column family descriptors
 */
private void writeRandomKeyValues(RecordWriter<ImmutableBytesWritable, Cell> writer,
    TaskAttemptContext context, Set<byte[]> families, int numRows)
    throws IOException, InterruptedException {
  byte keyBytes[] = new byte[Bytes.SIZEOF_INT];
  int valLength = 10;
  byte valBytes[] = new byte[valLength];

  int taskId = context.getTaskAttemptID().getTaskID().getId();
  assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
  final byte [] qualifier = Bytes.toBytes("data");
  Random random = new Random();
  for (int i = 0; i < numRows; i++) {

    Bytes.putInt(keyBytes, 0, i);
    random.nextBytes(valBytes);
    ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);

    for (byte[] family : families) {
      Cell kv = new KeyValue(keyBytes, family, qualifier, valBytes);
      writer.write(key, kv);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:TestHFileOutputFormat2.java

示例9: readSplit

import org.apache.hadoop.mapreduce.TaskAttemptContext; //导入依赖的package包/类
private static List<Text> readSplit(KeyValueTextInputFormat format, 
    InputSplit split, Job job) throws IOException, InterruptedException {
  List<Text> result = new ArrayList<Text>();
  Configuration conf = job.getConfiguration();
  TaskAttemptContext context = MapReduceTestUtil.
    createDummyMapTaskAttemptContext(conf);
  RecordReader<Text, Text> reader = format.createRecordReader(split, 
    MapReduceTestUtil.createDummyMapTaskAttemptContext(conf));
  MapContext<Text, Text, Text, Text> mcontext = 
    new MapContextImpl<Text, Text, Text, Text>(conf, 
    context.getTaskAttemptID(), reader, null, null,
    MapReduceTestUtil.createDummyReporter(), 
    split);
  reader.initialize(split, mcontext);
  while (reader.nextKeyValue()) {
    result.add(new Text(reader.getCurrentValue()));
  }
  reader.close();
  return result;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestMRKeyValueTextInputFormat.java

示例10: acquire

import org.apache.hadoop.mapreduce.TaskAttemptContext; //导入依赖的package包/类
/**
 * Factory method that
 * 1. acquires a chunk for the specified map-task attempt
 * 2. returns a DynamicInputChunk associated with the acquired chunk-file.
 * @param taskAttemptContext The attempt-context for the map task that's
 * trying to acquire a chunk.
 * @return The acquired dynamic-chunk. The chunk-file is renamed to the
 * attempt-id (from the attempt-context.)
 * @throws IOException Exception on failure.
 * @throws InterruptedException Exception on failure.
 */
public static DynamicInputChunk acquire(TaskAttemptContext taskAttemptContext)
                                    throws IOException, InterruptedException {
  if (!areInvariantsInitialized())
      initializeChunkInvariants(taskAttemptContext.getConfiguration());

  String taskId
          = taskAttemptContext.getTaskAttemptID().getTaskID().toString();
  Path acquiredFilePath = new Path(chunkRootPath, taskId);

  if (fs.exists(acquiredFilePath)) {
    LOG.info("Acquiring pre-assigned chunk: " + acquiredFilePath);
    return new DynamicInputChunk(acquiredFilePath, taskAttemptContext);
  }

  for (FileStatus chunkFile : getListOfChunkFiles()) {
    if (fs.rename(chunkFile.getPath(), acquiredFilePath)) {
      LOG.info(taskId + " acquired " + chunkFile.getPath());
      return new DynamicInputChunk(acquiredFilePath, taskAttemptContext);
    }
    else
      LOG.warn(taskId + " could not acquire " + chunkFile.getPath());
  }

  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:DynamicInputChunk.java

示例11: initialize

import org.apache.hadoop.mapreduce.TaskAttemptContext; //导入依赖的package包/类
@Override
public void initialize( final InputSplit inputSplit, final TaskAttemptContext context ) throws IOException, InterruptedException {
  FileSplit fileSplit = (FileSplit)inputSplit;
  Configuration config = context.getConfiguration();
  Path path = fileSplit.getPath();
  FileSystem fs = path.getFileSystem( config );
  long fileLength = fs.getLength( path );
  long start = fileSplit.getStart();
  long length = fileSplit.getLength();
  InputStream in = fs.open( path );
}
 
开发者ID:yahoojapan,项目名称:multiple-dimension-spread,代码行数:12,代码来源:MDSSpreadReader.java

示例12: close

import org.apache.hadoop.mapreduce.TaskAttemptContext; //导入依赖的package包/类
@Override
public void close(TaskAttemptContext context) 
throws IOException,InterruptedException {
  reporter.progress();
  if (out != null) {
    long bytesOutPrev = getOutputBytes(fsStats);
    out.close(context);
    long bytesOutCurr = getOutputBytes(fsStats);
    fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:MapTask.java

示例13: transition

import org.apache.hadoop.mapreduce.TaskAttemptContext; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  TaskAttemptContext taskContext =
    new TaskAttemptContextImpl(taskAttempt.conf,
        TypeConverter.fromYarn(taskAttempt.attemptId));
  taskAttempt.eventHandler.handle(new CommitterTaskAbortEvent(
      taskAttempt.attemptId, taskContext));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TaskAttemptImpl.java

示例14: initialize

import org.apache.hadoop.mapreduce.TaskAttemptContext; //导入依赖的package包/类
public void initialize(InputSplit genericSplit, TaskAttemptContext context)  {
    try {
        FileSplit split = (FileSplit)genericSplit;
        Configuration job = context.getConfiguration();
        this.maxLineLength = job.getInt("mapreduce.input.linerecordreader.line.maxlength", 2147483647);
        this.start = split.getStart();
        this.end = this.start + split.getLength();
        Path file = split.getPath();
        FileSystem fs = file.getFileSystem(job);
        this.fileIn = fs.open(file);
        CompressionCodec codec = (new CompressionCodecFactory(job)).getCodec(file);
        if(null != codec) {
            this.isCompressedInput = true;
            this.decompressor = CodecPool.getDecompressor(codec);
            if(codec instanceof SplittableCompressionCodec) {
                SplitCompressionInputStream cIn = ((SplittableCompressionCodec)codec).createInputStream(this.fileIn, this.decompressor, this.start, this.end, SplittableCompressionCodec.READ_MODE.BYBLOCK);
                this.in = new CompressedSplitLineReader(cIn, job, this.recordDelimiterBytes);
                this.start = cIn.getAdjustedStart();
                this.end = cIn.getAdjustedEnd();
                this.filePosition = cIn;
            } else {
                this.in = new SplitLineReader(codec.createInputStream(this.fileIn, this.decompressor), job, this.recordDelimiterBytes);
                this.filePosition = this.fileIn;
            }
        } else {
            this.fileIn.seek(this.start);
            this.in = new SplitLineReader(this.fileIn, job, this.recordDelimiterBytes);
            this.filePosition = this.fileIn;
        }

        if(this.start != 0L) {
            this.start += (long)this.in.readLine(new Text(), 0, this.maxBytesToConsume(this.start));
        }

        this.pos = this.start;
    }catch(Exception ex){
        LOG.warn("Exception occurred during initialization {}", ex, ex);
    }

}
 
开发者ID:Comcast,项目名称:spark-util,代码行数:41,代码来源:ErrorHandlingLineRecordReader.java

示例15: getRecordWriter

import org.apache.hadoop.mapreduce.TaskAttemptContext; //导入依赖的package包/类
@Override
public RecordWriter<NullWritable,BytesWritable> getRecordWriter(
    TaskAttemptContext job) throws IOException {

  return new ChunkWriter(getDefaultWorkFile(job, ""),
      job.getConfiguration());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:GenerateData.java


注:本文中的org.apache.hadoop.mapreduce.TaskAttemptContext类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。