当前位置: 首页>>代码示例>>Java>>正文


Java Mapper.Context方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.Mapper.Context方法的典型用法代码示例。如果您正苦于以下问题:Java Mapper.Context方法的具体用法?Java Mapper.Context怎么用?Java Mapper.Context使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapreduce.Mapper的用法示例。


在下文中一共展示了Mapper.Context方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getTmpFile

import org.apache.hadoop.mapreduce.Mapper; //导入方法依赖的package包/类
private Path getTmpFile(Path target, Mapper.Context context) {
  Path targetWorkPath = new Path(context.getConfiguration().
      get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));

  Path root = target.equals(targetWorkPath)? targetWorkPath.getParent() : targetWorkPath;
  LOG.info("Creating temp file: " +
      new Path(root, ".distcp.tmp." + context.getTaskAttemptID().toString()));
  return new Path(root, ".distcp.tmp." + context.getTaskAttemptID().toString());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:RetriableFileCopyCommand.java

示例2: addMapper

import org.apache.hadoop.mapreduce.Mapper; //导入方法依赖的package包/类
/**
 * Add mapper(the first mapper) that reads input from the input
 * context and writes to queue
 */
@SuppressWarnings("unchecked")
void addMapper(TaskInputOutputContext inputContext,
    ChainBlockingQueue<KeyValuePair<?, ?>> output, int index)
    throws IOException, InterruptedException {
  Configuration conf = getConf(index);
  Class<?> keyOutClass = conf.getClass(MAPPER_OUTPUT_KEY_CLASS, Object.class);
  Class<?> valueOutClass = conf.getClass(MAPPER_OUTPUT_VALUE_CLASS,
      Object.class);

  RecordReader rr = new ChainRecordReader(inputContext);
  RecordWriter rw = new ChainRecordWriter(keyOutClass, valueOutClass, output,
      conf);
  Mapper.Context mapperContext = createMapContext(rr, rw,
      (MapContext) inputContext, getConf(index));
  MapRunner runner = new MapRunner(mappers.get(index), mapperContext, rr, rw);
  threads.add(runner);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:Chain.java

示例3: copyToFile

import org.apache.hadoop.mapreduce.Mapper; //导入方法依赖的package包/类
private long copyToFile(Path targetPath, FileSystem targetFS,
    FileStatus sourceFileStatus, long sourceOffset, Mapper.Context context,
    EnumSet<FileAttribute> fileAttributes, final FileChecksum sourceChecksum)
    throws IOException {
  FsPermission permission = FsPermission.getFileDefault().applyUMask(
      FsPermission.getUMask(targetFS.getConf()));
  final OutputStream outStream;
  if (action == FileAction.OVERWRITE) {
    final short repl = getReplicationFactor(fileAttributes, sourceFileStatus,
        targetFS, targetPath);
    final long blockSize = getBlockSize(fileAttributes, sourceFileStatus,
        targetFS, targetPath);
    FSDataOutputStream out = targetFS.create(targetPath, permission,
        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
        BUFFER_SIZE, repl, blockSize, context,
        getChecksumOpt(fileAttributes, sourceChecksum));
    outStream = new BufferedOutputStream(out);
  } else {
    outStream = new BufferedOutputStream(targetFS.append(targetPath,
        BUFFER_SIZE));
  }
  return copyBytes(sourceFileStatus, sourceOffset, outStream, BUFFER_SIZE,
      context);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:25,代码来源:RetriableFileCopyCommand.java

示例4: startAligner

import org.apache.hadoop.mapreduce.Mapper; //导入方法依赖的package包/类
@Override
protected void startAligner(Mapper.Context context) throws IOException, InterruptedException {
    File file1 = new File(getFileName(tmpdir, taskId, 1));
    if (!file1.exists()) {
        file1.createNewFile();
    }
    fastqFile1 = new BufferedWriter(new FileWriter(file1.getAbsoluteFile()));
    if(isPaired) {
        File file2 = new File(getFileName(tmpdir, taskId, 2));
        if (!file2.exists()) {
                file2.createNewFile();
        }
        fastqFile2 = new BufferedWriter(new FileWriter(file2.getAbsoluteFile()));
    }
    // make output dir!
    File starOut = new File(starOutDir);
    starOut.mkdirs();
}
 
开发者ID:biointec,项目名称:halvade,代码行数:19,代码来源:STARInstance.java

示例5: enumDirectories

import org.apache.hadoop.mapreduce.Mapper; //导入方法依赖的package包/类
private void enumDirectories(FileSystem fs, URI rootUri, Path directory, boolean recursive,
    Mapper.Context context) throws IOException, InterruptedException {
  try {
    for (FileStatus status : fs.listStatus(directory, hiddenFileFilter)) {
      if (status.isDirectory()) {
        if (recursive) {
          if (directoryBlackList == null
              || !status.getPath().getName().matches(directoryBlackList)) {
            enumDirectories(fs,rootUri, status.getPath(), recursive, context);
          }
        }
      } else {
        context.write(new Text(rootUri.relativize(directory.toUri()).getPath()),
                new FileStatus(status));
      }
    }
    context.progress();
  } catch (FileNotFoundException e) {
    return;
  }
}
 
开发者ID:airbnb,项目名称:reair,代码行数:22,代码来源:ReplicationJob.java

示例6: getDataStream

import org.apache.hadoop.mapreduce.Mapper; //导入方法依赖的package包/类
/**
 * Convenience method to access #getDataStream(Configuration, Path)
 * from within a map task that read this LobRef from a file-based
 * InputSplit.
 * @param mapContext the Mapper.Context instance that encapsulates
 * the current map task.
 * @return an object that lazily streams the record to the client.
 * @throws IllegalArgumentException if it cannot find the source
 * path for this LOB based on the MapContext.
 * @throws IOException if it could not read the LOB from external storage.
 */
public ACCESSORTYPE getDataStream(Mapper.Context mapContext)
    throws IOException {
  InputSplit split = mapContext.getInputSplit();
  if (split instanceof FileSplit) {
    Path basePath = ((FileSplit) split).getPath().getParent();
    return getDataStream(mapContext.getConfiguration(),
      basePath);
  } else {
    throw new IllegalArgumentException(
        "Could not ascertain LOB base path from MapContext.");
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:24,代码来源:LobRef.java

示例7: runMapper

import org.apache.hadoop.mapreduce.Mapper; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
void runMapper(TaskInputOutputContext context, int index) throws IOException,
    InterruptedException {
  Mapper mapper = mappers.get(index);
  RecordReader rr = new ChainRecordReader(context);
  RecordWriter rw = new ChainRecordWriter(context);
  Mapper.Context mapperContext = createMapContext(rr, rw, context,
      getConf(index));
  mapper.run(mapperContext);
  rr.close();
  rw.close(context);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:Chain.java

示例8: copyBytes

import org.apache.hadoop.mapreduce.Mapper; //导入方法依赖的package包/类
@VisibleForTesting
long copyBytes(FileStatus sourceFileStatus, long sourceOffset,
    OutputStream outStream, int bufferSize, Mapper.Context context)
    throws IOException {
  Path source = sourceFileStatus.getPath();
  byte buf[] = new byte[bufferSize];
  ThrottledInputStream inStream = null;
  long totalBytesRead = 0;

  try {
    inStream = getInputStream(source, context.getConfiguration());
    int bytesRead = readBytes(inStream, buf, sourceOffset);
    while (bytesRead >= 0) {
      totalBytesRead += bytesRead;
      if (action == FileAction.APPEND) {
        sourceOffset += bytesRead;
      }
      outStream.write(buf, 0, bytesRead);
      updateContextStatus(totalBytesRead, context, sourceFileStatus);
      bytesRead = readBytes(inStream, buf, sourceOffset);
    }
    outStream.close();
    outStream = null;
  } finally {
    IOUtils.cleanup(LOG, outStream, inStream);
  }
  return totalBytesRead;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:RetriableFileCopyCommand.java

示例9: updateContextStatus

import org.apache.hadoop.mapreduce.Mapper; //导入方法依赖的package包/类
private void updateContextStatus(long totalBytesRead, Mapper.Context context,
                                 FileStatus sourceFileStatus) {
  StringBuilder message = new StringBuilder(DistCpUtils.getFormatter()
              .format(totalBytesRead * 100.0f / sourceFileStatus.getLen()));
  message.append("% ")
          .append(description).append(" [")
          .append(DistCpUtils.getStringDescriptionFor(totalBytesRead))
          .append('/')
      .append(DistCpUtils.getStringDescriptionFor(sourceFileStatus.getLen()))
          .append(']');
  context.setStatus(message.toString());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:RetriableFileCopyCommand.java

示例10: getCushaw2Instance

import org.apache.hadoop.mapreduce.Mapper; //导入方法依赖的package包/类
static public Cushaw2Instance getCushaw2Instance(Mapper.Context context, String bin, int task) throws IOException, InterruptedException, URISyntaxException {
    if(instance == null) {
        instance = new Cushaw2Instance(context, bin, task);
        instance.startAligner(context);
    }
    Cushaw2Instance.context = context;
    Logger.DEBUG("Started Cushaw2");
    return instance;
}
 
开发者ID:biointec,项目名称:halvade,代码行数:10,代码来源:Cushaw2Instance.java

示例11: getBWAInstance

import org.apache.hadoop.mapreduce.Mapper; //导入方法依赖的package包/类
static public BWAAlnInstance getBWAInstance(Mapper.Context context, String bin, int task) throws IOException, InterruptedException, URISyntaxException {
    if(instance == null) {
        instance = new BWAAlnInstance(context, bin, task);
        instance.startAligner(context);
    }
    BWAAlnInstance.context = context;
    Logger.DEBUG("Started BWA");
    return instance;
}
 
开发者ID:biointec,项目名称:halvade,代码行数:10,代码来源:BWAAlnInstance.java

示例12: getBowtie2Instance

import org.apache.hadoop.mapreduce.Mapper; //导入方法依赖的package包/类
static public Bowtie2Instance getBowtie2Instance(Mapper.Context context, String bin, int task) throws IOException, InterruptedException, URISyntaxException {
    if(instance == null) {
        instance = new Bowtie2Instance(context, bin, task);
        instance.startAligner(context);
    }
    Bowtie2Instance.context = context;
    Logger.DEBUG("Started Bowtie2");
    return instance;
}
 
开发者ID:biointec,项目名称:halvade,代码行数:10,代码来源:Bowtie2Instance.java

示例13: Cushaw2Instance

import org.apache.hadoop.mapreduce.Mapper; //导入方法依赖的package包/类
private Cushaw2Instance(Mapper.Context context, String bin, int task) throws IOException, URISyntaxException {
    super(context, bin, task);  
    taskId = context.getTaskAttemptID().toString();
    taskId = taskId.substring(taskId.indexOf("m_"));
    ref = HalvadeFileUtils.downloadCushaw2Index(context);
    cushaw2CustomArgs = HalvadeConf.getCustomArgs(context.getConfiguration(), "cushaw2", "");
}
 
开发者ID:biointec,项目名称:halvade,代码行数:8,代码来源:Cushaw2Instance.java

示例14: STARInstance

import org.apache.hadoop.mapreduce.Mapper; //导入方法依赖的package包/类
private STARInstance(Mapper.Context context, String bin, int starType, int task) throws IOException, URISyntaxException {
    super(context, bin, task);
    this.starType = starType;
    taskId = context.getTaskAttemptID().toString();
    taskId = taskId.substring(taskId.indexOf("m_"));
    ref = HalvadeFileUtils.downloadSTARIndex(context, taskId, starType == PASS2);
    stargtf = HalvadeConf.getStarGtf(context.getConfiguration());
    Logger.DEBUG("ref: " + ref);
    starOutDir = tmpdir + taskId + "-STARout/";
    nReads = 0;
    overhang = 0;
}
 
开发者ID:biointec,项目名称:halvade,代码行数:13,代码来源:STARInstance.java

示例15: BWAAlnInstance

import org.apache.hadoop.mapreduce.Mapper; //导入方法依赖的package包/类
private BWAAlnInstance(Mapper.Context context, String bin, int task) throws IOException, URISyntaxException {
    super(context, bin, task);  
    taskId = context.getTaskAttemptID().toString();
    taskId = taskId.substring(taskId.indexOf("m_"));
    ref = HalvadeFileUtils.downloadBWAIndex(context);
    alnCustomArgs = HalvadeConf.getCustomArgs(context.getConfiguration(), "bwa", "aln");
}
 
开发者ID:biointec,项目名称:halvade,代码行数:8,代码来源:BWAAlnInstance.java


注:本文中的org.apache.hadoop.mapreduce.Mapper.Context方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。