当前位置: 首页>>代码示例>>Java>>正文


Java Reporter.setStatus方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapred.Reporter.setStatus方法的典型用法代码示例。如果您正苦于以下问题:Java Reporter.setStatus方法的具体用法?Java Reporter.setStatus怎么用?Java Reporter.setStatus使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapred.Reporter的用法示例。


在下文中一共展示了Reporter.setStatus方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doIO

import org.apache.hadoop.mapred.Reporter; //导入方法依赖的package包/类
@Override // IOMapperBase
public Long doIO(Reporter reporter, 
                   String name, 
                   long totalSize // in bytes
                 ) throws IOException {
  OutputStream out = (OutputStream)this.stream;
  // write to the file
  long nrRemaining;
  for (nrRemaining = totalSize; nrRemaining > 0; nrRemaining -= bufferSize) {
    int curSize = (bufferSize < nrRemaining) ? bufferSize : (int)nrRemaining;
    out.write(buffer, 0, curSize);
    reporter.setStatus("writing " + name + "@" + 
                       (totalSize - nrRemaining) + "/" + totalSize 
                       + " ::host = " + hostName);
  }
  return Long.valueOf(totalSize);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestDFSIO.java

示例2: reduce

import org.apache.hadoop.mapred.Reporter; //导入方法依赖的package包/类
public void reduce(IntWritable key, Iterator<Text> values,
    OutputCollector<Text, Text> out,
    Reporter reporter) throws IOException {
  keyVal = key.get();
  while(values.hasNext()) {
    Text value = values.next();
    String towrite = value.toString() + "\n";
    indexStream.write(towrite.getBytes(Charsets.UTF_8));
    written++;
    if (written > numIndexes -1) {
      // every 1000 indexes we report status
      reporter.setStatus("Creating index for archives");
      reporter.progress();
      endIndex = keyVal;
      String masterWrite = startIndex + " " + endIndex + " " + startPos 
                          +  " " + indexStream.getPos() + " \n" ;
      outStream.write(masterWrite.getBytes(Charsets.UTF_8));
      startPos = indexStream.getPos();
      startIndex = endIndex;
      written = 0;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:HadoopArchives.java

示例3: map

import org.apache.hadoop.mapred.Reporter; //导入方法依赖的package包/类
/** Run a FileOperation */
public void map(Text key, FileOperation value,
    OutputCollector<WritableComparable<?>, Text> out, Reporter reporter
    ) throws IOException {
  try {
    value.run(jobconf);
    ++succeedcount;
    reporter.incrCounter(Counter.SUCCEED, 1);
  } catch (IOException e) {
    ++failcount;
    reporter.incrCounter(Counter.FAIL, 1);

    String s = "FAIL: " + value + ", " + StringUtils.stringifyException(e);
    out.collect(null, new Text(s));
    LOG.info(s);
  } finally {
    reporter.setStatus(getCountString());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:DistCh.java

示例4: doOpenReadOp

import org.apache.hadoop.mapred.Reporter; //导入方法依赖的package包/类
/**
 * Open operation
 * @param name of the prefix of the putput file to be read
 * @param reporter an instanse of (@link Reporter) to be used for
 *   status' updates
 */
private void doOpenReadOp(String name,
                          Reporter reporter) {
  FSDataInputStream input;
  byte[] buffer = new byte[bytesToWrite];
  
  for (long l = 0l; l < numberOfFiles; l++) {
    Path filePath = new Path(new Path(baseDir, dataDirName), 
            name + "_" + l);

    boolean successfulOp = false;
    while (! successfulOp && numOfExceptions < MAX_OPERATION_EXCEPTIONS) {
      try {
        // Set up timer for measuring AL
        startTimeAL = System.currentTimeMillis();
        input = filesystem.open(filePath);
        totalTimeAL1 += (System.currentTimeMillis() - startTimeAL);
        
        // If the file needs to be read (specified at command line)
        if (readFile) {
          startTimeAL = System.currentTimeMillis();
          input.readFully(buffer);

          totalTimeAL2 += (System.currentTimeMillis() - startTimeAL);
        }
        input.close();
        successfulOp = true;
        successfulFileOps ++;

        reporter.setStatus("Finish "+ l + " files");
      } catch (IOException e) {
        LOG.info("Exception recorded in op: OpenRead " + e);
        numOfExceptions++;
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:NNBench.java

示例5: doRenameOp

import org.apache.hadoop.mapred.Reporter; //导入方法依赖的package包/类
/**
 * Rename operation
 * @param name of prefix of the file to be renamed
 * @param reporter an instanse of (@link Reporter) to be used for
 *   status' updates
 */
private void doRenameOp(String name,
                        Reporter reporter) {
  for (long l = 0l; l < numberOfFiles; l++) {
    Path filePath = new Path(new Path(baseDir, dataDirName), 
            name + "_" + l);
    Path filePathR = new Path(new Path(baseDir, dataDirName), 
            name + "_r_" + l);

    boolean successfulOp = false;
    while (! successfulOp && numOfExceptions < MAX_OPERATION_EXCEPTIONS) {
      try {
        // Set up timer for measuring AL
        startTimeAL = System.currentTimeMillis();
        filesystem.rename(filePath, filePathR);
        totalTimeAL1 += (System.currentTimeMillis() - startTimeAL);
        
        successfulOp = true;
        successfulFileOps ++;

        reporter.setStatus("Finish "+ l + " files");
      } catch (IOException e) {
        LOG.info("Exception recorded in op: Rename");

        numOfExceptions++;
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:NNBench.java

示例6: doDeleteOp

import org.apache.hadoop.mapred.Reporter; //导入方法依赖的package包/类
/**
 * Delete operation
 * @param name of prefix of the file to be deleted
 * @param reporter an instanse of (@link Reporter) to be used for
 *   status' updates
 */
private void doDeleteOp(String name,
                        Reporter reporter) {
  for (long l = 0l; l < numberOfFiles; l++) {
    Path filePath = new Path(new Path(baseDir, dataDirName), 
            name + "_" + l);
    
    boolean successfulOp = false;
    while (! successfulOp && numOfExceptions < MAX_OPERATION_EXCEPTIONS) {
      try {
        // Set up timer for measuring AL
        startTimeAL = System.currentTimeMillis();
        filesystem.delete(filePath, true);
        totalTimeAL1 += (System.currentTimeMillis() - startTimeAL);
        
        successfulOp = true;
        successfulFileOps ++;

        reporter.setStatus("Finish "+ l + " files");
      } catch (IOException e) {
        LOG.info("Exception in recorded op: Delete");

        numOfExceptions++;
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:NNBench.java

示例7: getRecordReader

import org.apache.hadoop.mapred.Reporter; //导入方法依赖的package包/类
public RecordReader<LongWritable, Text> getRecordReader(
                                          InputSplit genericSplit,
                                          JobConf job,
                                          Reporter reporter) 
throws IOException {
  reporter.setStatus(genericSplit.toString());
  return new LineRecordReader(job, (FileSplit) genericSplit);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:NLineInputFormat.java

示例8: map

import org.apache.hadoop.mapred.Reporter; //导入方法依赖的package包/类
public void map(LongWritable key, HarEntry value,
    OutputCollector<IntWritable, Text> out,
    Reporter reporter) throws IOException {
  Path relPath = new Path(value.path);
  int hash = HarFileSystem.getHarHash(relPath);
  String towrite = null;
  Path srcPath = realPath(relPath, rootPath);
  long startPos = partStream.getPos();
  FileSystem srcFs = srcPath.getFileSystem(conf);
  FileStatus srcStatus = srcFs.getFileStatus(srcPath);
  String propStr = encodeProperties(srcStatus);
  if (value.isDir()) { 
    towrite = encodeName(relPath.toString())
              + " dir " + propStr + " 0 0 ";
    StringBuffer sbuff = new StringBuffer();
    sbuff.append(towrite);
    for (String child: value.children) {
      sbuff.append(encodeName(child) + " ");
    }
    towrite = sbuff.toString();
    //reading directories is also progress
    reporter.progress();
  }
  else {
    FSDataInputStream input = srcFs.open(srcStatus.getPath());
    reporter.setStatus("Copying file " + srcStatus.getPath() + 
        " to archive.");
    copyData(srcStatus.getPath(), input, partStream, reporter);
    towrite = encodeName(relPath.toString())
              + " file " + partname + " " + startPos
              + " " + srcStatus.getLen() + " " + propStr + " ";
  }
  out.collect(new IntWritable(hash), new Text(towrite));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:HadoopArchives.java

示例9: doCopyFile

import org.apache.hadoop.mapred.Reporter; //导入方法依赖的package包/类
/**
 * Copies single file to the path specified by tmpfile.
 * @param srcstat  src path and metadata
 * @param tmpfile  temporary file to which copy is to be done
 * @param absdst   actual destination path to which copy is to be done
 * @param reporter
 * @return Number of bytes copied
 */
private long doCopyFile(FileStatus srcstat, Path tmpfile, Path absdst,
                        Reporter reporter) throws IOException {
  long bytesCopied = 0L;
  Path srcPath = srcstat.getPath();
  // open src file
  try (FSDataInputStream in = srcPath.getFileSystem(job).open(srcPath)) {
    reporter.incrCounter(Counter.BYTESEXPECTED, srcstat.getLen());
    // open tmp file
    try (FSDataOutputStream out = create(tmpfile, reporter, srcstat)) {
      LOG.info("Copying file " + srcPath + " of size " +
               srcstat.getLen() + " bytes...");
    
      // copy file
      for(int bytesRead; (bytesRead = in.read(buffer)) >= 0; ) {
        out.write(buffer, 0, bytesRead);
        bytesCopied += bytesRead;
        reporter.setStatus(
            String.format("%.2f ", bytesCopied*100.0/srcstat.getLen())
            + absdst + " [ " +
            TraditionalBinaryPrefix.long2String(bytesCopied, "", 1) + " / "
            + TraditionalBinaryPrefix.long2String(srcstat.getLen(), "", 1)
            + " ]");
      }
    }
  }
  return bytesCopied;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:DistCpV1.java

示例10: regroup

import org.apache.hadoop.mapred.Reporter; //导入方法依赖的package包/类
/**
 * This is the function that re-groups values for a key into sub-groups based
 * on a secondary key (input tag).
 * 
 * @param arg1
 * @return
 */
private SortedMap<Object, ResetableIterator> regroup(Object key,
                                                     Iterator arg1, Reporter reporter) throws IOException {
  this.numOfValues = 0;
  SortedMap<Object, ResetableIterator> retv = new TreeMap<Object, ResetableIterator>();
  TaggedMapOutput aRecord = null;
  while (arg1.hasNext()) {
    this.numOfValues += 1;
    if (this.numOfValues % 100 == 0) {
      reporter.setStatus("key: " + key.toString() + " numOfValues: "
                         + this.numOfValues);
    }
    if (this.numOfValues > this.maxNumOfValuesPerGroup) {
      continue;
    }
    aRecord = ((TaggedMapOutput) arg1.next()).clone(job);
    Text tag = aRecord.getTag();
    ResetableIterator data = retv.get(tag);
    if (data == null) {
      data = createResetableIterator();
      retv.put(tag, data);
    }
    data.add(aRecord);
  }
  if (this.numOfValues > this.largestNumOfValues) {
    this.largestNumOfValues = numOfValues;
    LOG.info("key: " + key.toString() + " this.largestNumOfValues: "
             + this.largestNumOfValues);
  }
  return retv;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:DataJoinReducerBase.java

示例11: collect

import org.apache.hadoop.mapred.Reporter; //导入方法依赖的package包/类
/**
 * The subclass can overwrite this method to perform additional filtering
 * and/or other processing logic before a value is collected.
 * 
 * @param key
 * @param aRecord
 * @param output
 * @param reporter
 * @throws IOException
 */
protected void collect(Object key, TaggedMapOutput aRecord,
                       OutputCollector output, Reporter reporter) throws IOException {
  this.collected += 1;
  addLongValue("collectedCount", 1);
  if (aRecord != null) {
    output.collect(key, aRecord.getData());
    reporter.setStatus("key: " + key.toString() + " collected: " + collected);
    addLongValue("actuallyCollectedCount", 1);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:DataJoinReducerBase.java

示例12: doCreateWriteOp

import org.apache.hadoop.mapred.Reporter; //导入方法依赖的package包/类
/**
 * Create and Write operation.
 * @param name of the prefix of the putput file to be created
 * @param reporter an instanse of (@link Reporter) to be used for
 *   status' updates
 */
private void doCreateWriteOp(String name,
                             Reporter reporter) {
  FSDataOutputStream out;
  byte[] buffer = new byte[bytesToWrite];
  
  for (long l = 0l; l < numberOfFiles; l++) {
    Path filePath = new Path(new Path(baseDir, dataDirName), 
            name + "_" + l);

    boolean successfulOp = false;
    while (! successfulOp && numOfExceptions < MAX_OPERATION_EXCEPTIONS) {
      try {
        // Set up timer for measuring AL (transaction #1)
        startTimeAL = System.currentTimeMillis();
        // Create the file
        // Use a buffer size of 512
        out = filesystem.create(filePath, 
                true, 
                512, 
                replFactor, 
                blkSize);
        out.write(buffer);
        totalTimeAL1 += (System.currentTimeMillis() - startTimeAL);

        // Close the file / file output stream
        // Set up timers for measuring AL (transaction #2)
        startTimeAL = System.currentTimeMillis();
        out.close();
        
        totalTimeAL2 += (System.currentTimeMillis() - startTimeAL);
        successfulOp = true;
        successfulFileOps ++;

        reporter.setStatus("Finish "+ l + " files");
      } catch (IOException e) {
        LOG.info("Exception recorded in op: " +
                "Create/Write/Close");
 
        numOfExceptions++;
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:NNBench.java

示例13: updateStatus

import org.apache.hadoop.mapred.Reporter; //导入方法依赖的package包/类
private void updateStatus(Reporter reporter) {
  reporter.setStatus(getCountString());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:4,代码来源:DistCpV1.java

示例14: logAndSetStatus

import org.apache.hadoop.mapred.Reporter; //导入方法依赖的package包/类
/**
 * Logs to the given reporter and logs to the internal logger at info level
 * 
 * @param r
 *          the reporter to set status on
 * @param msg
 *          the message to log
 */
private void logAndSetStatus(Reporter r, String msg) {
  r.setStatus(msg);
  LOG.info(msg);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:SliveReducer.java


注:本文中的org.apache.hadoop.mapred.Reporter.setStatus方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。