當前位置: 首頁>>代碼示例>>Java>>正文


Java TaskAttemptContext類代碼示例

本文整理匯總了Java中org.apache.hadoop.mapreduce.TaskAttemptContext的典型用法代碼示例。如果您正苦於以下問題:Java TaskAttemptContext類的具體用法?Java TaskAttemptContext怎麽用?Java TaskAttemptContext使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


TaskAttemptContext類屬於org.apache.hadoop.mapreduce包,在下文中一共展示了TaskAttemptContext類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: writeOutput

import org.apache.hadoop.mapreduce.TaskAttemptContext; //導入依賴的package包/類
private void writeOutput(RecordWriter theRecordWriter,
    TaskAttemptContext context) throws IOException, InterruptedException {
  NullWritable nullWritable = NullWritable.get();

  try {
    theRecordWriter.write(key1, val1);
    theRecordWriter.write(null, nullWritable);
    theRecordWriter.write(null, val1);
    theRecordWriter.write(nullWritable, val2);
    theRecordWriter.write(key2, nullWritable);
    theRecordWriter.write(key1, null);
    theRecordWriter.write(null, null);
    theRecordWriter.write(key2, val2);
  } finally {
    theRecordWriter.close(context);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:TestFileOutputCommitter.java

示例2: NewTrackingRecordReader

import org.apache.hadoop.mapreduce.TaskAttemptContext; //導入依賴的package包/類
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
    org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
    TaskReporter reporter,
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException, IOException {
  this.reporter = reporter;
  this.inputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_INPUT_RECORDS);
  this.fileInputByteCounter = reporter
      .getCounter(FileInputFormatCounter.BYTES_READ);

  List <Statistics> matchedStats = null;
  if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
    matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
        .getPath(), taskContext.getConfiguration());
  }
  fsStats = matchedStats;

  long bytesInPrev = getInputBytes(fsStats);
  this.real = inputFormat.createRecordReader(split, taskContext);
  long bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:24,代碼來源:MapTask.java

示例3: writeRandomKeyValues

import org.apache.hadoop.mapreduce.TaskAttemptContext; //導入依賴的package包/類
/**
 * Write random values to the writer assuming a table created using
 * {@link #FAMILIES} as column family descriptors
 */
private void writeRandomKeyValues(RecordWriter<ImmutableBytesWritable, KeyValue> writer,
    TaskAttemptContext context, Set<byte[]> families, int numRows)
    throws IOException, InterruptedException {
  byte keyBytes[] = new byte[Bytes.SIZEOF_INT];
  int valLength = 10;
  byte valBytes[] = new byte[valLength];

  int taskId = context.getTaskAttemptID().getTaskID().getId();
  assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
  final byte [] qualifier = Bytes.toBytes("data");
  Random random = new Random();
  for (int i = 0; i < numRows; i++) {

    Bytes.putInt(keyBytes, 0, i);
    random.nextBytes(valBytes);
    ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);

    for (byte[] family : families) {
      KeyValue kv = new KeyValue(keyBytes, family, qualifier, valBytes);
      writer.write(key, kv);
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:28,代碼來源:TestHFileOutputFormat.java

示例4: applyMapperJdbcUrl

import org.apache.hadoop.mapreduce.TaskAttemptContext; //導入依賴的package包/類
protected void applyMapperJdbcUrl(TaskAttemptContext context, int mapperId) {

    Configuration conf = context.getConfiguration();

    // Retrieve the JDBC URL that should be used by this mapper.
    // We achieve this by modifying the JDBC URL property in the
    // configuration, prior to the OraOopDBRecordWriter's (ancestral)
    // constructor using the configuration to establish a connection
    // to the database - via DBConfiguration.getConnection()...
    String mapperJdbcUrlPropertyName =
        OraOopUtilities.getMapperJdbcUrlPropertyName(mapperId, conf);

    // Get this mapper's JDBC URL
    String mapperJdbcUrl = conf.get(mapperJdbcUrlPropertyName, null);

    LOG.debug(String.format("Mapper %d has a JDBC URL of: %s", mapperId,
        mapperJdbcUrl == null ? "<null>" : mapperJdbcUrl));

    if (mapperJdbcUrl != null) {
      conf.set(DBConfiguration.URL_PROPERTY, mapperJdbcUrl);
    }
  }
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:23,代碼來源:OraOopOutputFormatBase.java

示例5: OraOopDBRecordWriterBase

import org.apache.hadoop.mapreduce.TaskAttemptContext; //導入依賴的package包/類
public OraOopDBRecordWriterBase(TaskAttemptContext context, int mapperId)
    throws ClassNotFoundException, SQLException {

  super(context);
  this.mapperId = mapperId;
  this.mapperRowNumber = 1;

  Configuration conf = context.getConfiguration();

  // Log any info that might be useful to us...
  logBatchSettings();

  // Connect to Oracle...
  Connection connection = this.getConnection();

  String thisOracleInstanceName =
      OraOopOracleQueries.getCurrentOracleInstanceName(connection);
  LOG.info(String.format(
      "This record writer is connected to Oracle via the JDBC URL: \n"
          + "\t\"%s\"\n" + "\tto the Oracle instance: \"%s\"", connection
          .toString(), thisOracleInstanceName));

  // Initialize the Oracle session...
  OracleConnectionFactory.initializeOracleConnection(connection, conf);
  connection.setAutoCommit(false);
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:27,代碼來源:OraOopOutputFormatBase.java

示例6: getExportTableAndColumns

import org.apache.hadoop.mapreduce.TaskAttemptContext; //導入依賴的package包/類
protected void getExportTableAndColumns(TaskAttemptContext context)
    throws SQLException {

  Configuration conf = context.getConfiguration();

  String schema =
      context.getConfiguration().get(OraOopConstants.ORAOOP_TABLE_OWNER);
  String localTableName =
      context.getConfiguration().get(OraOopConstants.ORAOOP_TABLE_NAME);

  if (schema == null || schema.isEmpty() || localTableName == null
      || localTableName.isEmpty()) {
    throw new RuntimeException(
        "Unable to recall the schema and name of the Oracle table "
        + "being exported.");
  }

  this.oracleTable = new OracleTable(schema, localTableName);

  setOracleTableColumns(OraOopOracleQueries.getTableColumns(this
      .getConnection(), this.oracleTable, OraOopUtilities
      .omitLobAndLongColumnsDuringImport(conf), OraOopUtilities
      .recallSqoopJobType(conf), true // <- onlyOraOopSupportedTypes
      , false // <- omitOraOopPseudoColumns
      ));
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:27,代碼來源:OraOopOutputFormatBase.java

示例7: readSplit

import org.apache.hadoop.mapreduce.TaskAttemptContext; //導入依賴的package包/類
private static List<Text> readSplit(InputFormat<LongWritable,Text> format,
  InputSplit split, Job job) throws IOException, InterruptedException {
  List<Text> result = new ArrayList<Text>();
  Configuration conf = job.getConfiguration();
  TaskAttemptContext context = MapReduceTestUtil.
    createDummyMapTaskAttemptContext(conf);
  RecordReader<LongWritable, Text> reader = format.createRecordReader(split,
    MapReduceTestUtil.createDummyMapTaskAttemptContext(conf));
  MapContext<LongWritable,Text,LongWritable,Text> mcontext =
    new MapContextImpl<LongWritable,Text,LongWritable,Text>(conf,
    context.getTaskAttemptID(), reader, null, null,
    MapReduceTestUtil.createDummyReporter(),
    split);
  reader.initialize(split, mcontext);
  while (reader.nextKeyValue()) {
    result.add(new Text(reader.getCurrentValue()));
  }
  return result;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:TestCombineTextInputFormat.java

示例8: writeRandomKeyValues

import org.apache.hadoop.mapreduce.TaskAttemptContext; //導入依賴的package包/類
/**
 * Write random values to the writer assuming a table created using
 * {@link #FAMILIES} as column family descriptors
 */
private void writeRandomKeyValues(RecordWriter<ImmutableBytesWritable, Cell> writer,
    TaskAttemptContext context, Set<byte[]> families, int numRows)
    throws IOException, InterruptedException {
  byte keyBytes[] = new byte[Bytes.SIZEOF_INT];
  int valLength = 10;
  byte valBytes[] = new byte[valLength];

  int taskId = context.getTaskAttemptID().getTaskID().getId();
  assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
  final byte [] qualifier = Bytes.toBytes("data");
  Random random = new Random();
  for (int i = 0; i < numRows; i++) {

    Bytes.putInt(keyBytes, 0, i);
    random.nextBytes(valBytes);
    ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);

    for (byte[] family : families) {
      Cell kv = new KeyValue(keyBytes, family, qualifier, valBytes);
      writer.write(key, kv);
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:28,代碼來源:TestHFileOutputFormat2.java

示例9: readSplit

import org.apache.hadoop.mapreduce.TaskAttemptContext; //導入依賴的package包/類
private static List<Text> readSplit(KeyValueTextInputFormat format, 
    InputSplit split, Job job) throws IOException, InterruptedException {
  List<Text> result = new ArrayList<Text>();
  Configuration conf = job.getConfiguration();
  TaskAttemptContext context = MapReduceTestUtil.
    createDummyMapTaskAttemptContext(conf);
  RecordReader<Text, Text> reader = format.createRecordReader(split, 
    MapReduceTestUtil.createDummyMapTaskAttemptContext(conf));
  MapContext<Text, Text, Text, Text> mcontext = 
    new MapContextImpl<Text, Text, Text, Text>(conf, 
    context.getTaskAttemptID(), reader, null, null,
    MapReduceTestUtil.createDummyReporter(), 
    split);
  reader.initialize(split, mcontext);
  while (reader.nextKeyValue()) {
    result.add(new Text(reader.getCurrentValue()));
  }
  reader.close();
  return result;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:TestMRKeyValueTextInputFormat.java

示例10: acquire

import org.apache.hadoop.mapreduce.TaskAttemptContext; //導入依賴的package包/類
/**
 * Factory method that
 * 1. acquires a chunk for the specified map-task attempt
 * 2. returns a DynamicInputChunk associated with the acquired chunk-file.
 * @param taskAttemptContext The attempt-context for the map task that's
 * trying to acquire a chunk.
 * @return The acquired dynamic-chunk. The chunk-file is renamed to the
 * attempt-id (from the attempt-context.)
 * @throws IOException Exception on failure.
 * @throws InterruptedException Exception on failure.
 */
public static DynamicInputChunk acquire(TaskAttemptContext taskAttemptContext)
                                    throws IOException, InterruptedException {
  if (!areInvariantsInitialized())
      initializeChunkInvariants(taskAttemptContext.getConfiguration());

  String taskId
          = taskAttemptContext.getTaskAttemptID().getTaskID().toString();
  Path acquiredFilePath = new Path(chunkRootPath, taskId);

  if (fs.exists(acquiredFilePath)) {
    LOG.info("Acquiring pre-assigned chunk: " + acquiredFilePath);
    return new DynamicInputChunk(acquiredFilePath, taskAttemptContext);
  }

  for (FileStatus chunkFile : getListOfChunkFiles()) {
    if (fs.rename(chunkFile.getPath(), acquiredFilePath)) {
      LOG.info(taskId + " acquired " + chunkFile.getPath());
      return new DynamicInputChunk(acquiredFilePath, taskAttemptContext);
    }
    else
      LOG.warn(taskId + " could not acquire " + chunkFile.getPath());
  }

  return null;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:37,代碼來源:DynamicInputChunk.java

示例11: initialize

import org.apache.hadoop.mapreduce.TaskAttemptContext; //導入依賴的package包/類
@Override
public void initialize( final InputSplit inputSplit, final TaskAttemptContext context ) throws IOException, InterruptedException {
  FileSplit fileSplit = (FileSplit)inputSplit;
  Configuration config = context.getConfiguration();
  Path path = fileSplit.getPath();
  FileSystem fs = path.getFileSystem( config );
  long fileLength = fs.getLength( path );
  long start = fileSplit.getStart();
  long length = fileSplit.getLength();
  InputStream in = fs.open( path );
}
 
開發者ID:yahoojapan,項目名稱:multiple-dimension-spread,代碼行數:12,代碼來源:MDSSpreadReader.java

示例12: close

import org.apache.hadoop.mapreduce.TaskAttemptContext; //導入依賴的package包/類
@Override
public void close(TaskAttemptContext context) 
throws IOException,InterruptedException {
  reporter.progress();
  if (out != null) {
    long bytesOutPrev = getOutputBytes(fsStats);
    out.close(context);
    long bytesOutCurr = getOutputBytes(fsStats);
    fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:12,代碼來源:MapTask.java

示例13: transition

import org.apache.hadoop.mapreduce.TaskAttemptContext; //導入依賴的package包/類
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  TaskAttemptContext taskContext =
    new TaskAttemptContextImpl(taskAttempt.conf,
        TypeConverter.fromYarn(taskAttempt.attemptId));
  taskAttempt.eventHandler.handle(new CommitterTaskAbortEvent(
      taskAttempt.attemptId, taskContext));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:11,代碼來源:TaskAttemptImpl.java

示例14: initialize

import org.apache.hadoop.mapreduce.TaskAttemptContext; //導入依賴的package包/類
public void initialize(InputSplit genericSplit, TaskAttemptContext context)  {
    try {
        FileSplit split = (FileSplit)genericSplit;
        Configuration job = context.getConfiguration();
        this.maxLineLength = job.getInt("mapreduce.input.linerecordreader.line.maxlength", 2147483647);
        this.start = split.getStart();
        this.end = this.start + split.getLength();
        Path file = split.getPath();
        FileSystem fs = file.getFileSystem(job);
        this.fileIn = fs.open(file);
        CompressionCodec codec = (new CompressionCodecFactory(job)).getCodec(file);
        if(null != codec) {
            this.isCompressedInput = true;
            this.decompressor = CodecPool.getDecompressor(codec);
            if(codec instanceof SplittableCompressionCodec) {
                SplitCompressionInputStream cIn = ((SplittableCompressionCodec)codec).createInputStream(this.fileIn, this.decompressor, this.start, this.end, SplittableCompressionCodec.READ_MODE.BYBLOCK);
                this.in = new CompressedSplitLineReader(cIn, job, this.recordDelimiterBytes);
                this.start = cIn.getAdjustedStart();
                this.end = cIn.getAdjustedEnd();
                this.filePosition = cIn;
            } else {
                this.in = new SplitLineReader(codec.createInputStream(this.fileIn, this.decompressor), job, this.recordDelimiterBytes);
                this.filePosition = this.fileIn;
            }
        } else {
            this.fileIn.seek(this.start);
            this.in = new SplitLineReader(this.fileIn, job, this.recordDelimiterBytes);
            this.filePosition = this.fileIn;
        }

        if(this.start != 0L) {
            this.start += (long)this.in.readLine(new Text(), 0, this.maxBytesToConsume(this.start));
        }

        this.pos = this.start;
    }catch(Exception ex){
        LOG.warn("Exception occurred during initialization {}", ex, ex);
    }

}
 
開發者ID:Comcast,項目名稱:spark-util,代碼行數:41,代碼來源:ErrorHandlingLineRecordReader.java

示例15: getRecordWriter

import org.apache.hadoop.mapreduce.TaskAttemptContext; //導入依賴的package包/類
@Override
public RecordWriter<NullWritable,BytesWritable> getRecordWriter(
    TaskAttemptContext job) throws IOException {

  return new ChunkWriter(getDefaultWorkFile(job, ""),
      job.getConfiguration());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:GenerateData.java


注:本文中的org.apache.hadoop.mapreduce.TaskAttemptContext類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。