当前位置: 首页>>代码示例>>Java>>正文


Java PerfCounters类代码示例

本文整理汇总了Java中org.apache.sqoop.util.PerfCounters的典型用法代码示例。如果您正苦于以下问题:Java PerfCounters类的具体用法?Java PerfCounters怎么用?Java PerfCounters使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


PerfCounters类属于org.apache.sqoop.util包,在下文中一共展示了PerfCounters类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: runJob

import org.apache.sqoop.util.PerfCounters; //导入依赖的package包/类
@Override
protected boolean runJob(Job job) throws ClassNotFoundException, IOException,
    InterruptedException {

  PerfCounters perfCounters = new PerfCounters();
  perfCounters.startClock();

  boolean success = doSubmitJob(job);
  perfCounters.stopClock();

  Counters jobCounters = job.getCounters();
  // If the job has been retired, these may be unavailable.
  if (null == jobCounters) {
    displayRetiredJobNotice(LOG);
  } else {
    perfCounters.addBytes(jobCounters.getGroup("FileSystemCounters")
      .findCounter("HDFS_BYTES_READ").getValue());
    LOG.info("Transferred " + perfCounters.toString());
    long numRecords =  ConfigurationHelper.getNumMapInputRecords(job);
    LOG.info("Exported " + numRecords + " records.");
  }

  return success;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:25,代码来源:ExportJobBase.java

示例2: runJob

import org.apache.sqoop.util.PerfCounters; //导入依赖的package包/类
@Override
protected boolean runJob(Job job) throws ClassNotFoundException, IOException,
    InterruptedException {

  PerfCounters perfCounters = new PerfCounters();
  perfCounters.startClock();

  boolean success = doSubmitJob(job);
  perfCounters.stopClock();

  Counters jobCounters = job.getCounters();
  // If the job has been retired, these may be unavailable.
  if (null == jobCounters) {
    displayRetiredJobNotice(LOG);
  } else {
    perfCounters.addBytes(jobCounters.getGroup("FileSystemCounters")
        .findCounter("HDFS_BYTES_READ").getValue());
    LOG.info("Transferred " + perfCounters.toString());
    long numRecords =  ConfigurationHelper.getNumMapInputRecords(job);
    LOG.info("Exported " + numRecords + " records.");
  }

  return success;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:25,代码来源:HdfsOdpsImportJob.java

示例3: runJob

import org.apache.sqoop.util.PerfCounters; //导入依赖的package包/类
/**
 * Actually run the MapReduce job.
 */
@Override
protected boolean runJob(Job job) throws ClassNotFoundException, IOException,
    InterruptedException {

  PerfCounters perfCounters = new PerfCounters();
  perfCounters.startClock();

  boolean success = job.waitForCompletion(true);
  perfCounters.stopClock();

  Counters jobCounters = job.getCounters();
  // If the job has been retired, these may be unavailable.
  if (null == jobCounters) {
    displayRetiredJobNotice(LOG);
  } else {
    perfCounters.addBytes(jobCounters.getGroup("FileSystemCounters")
      .findCounter("HDFS_BYTES_WRITTEN").getValue());
    LOG.info("Transferred " + perfCounters.toString());
    long numRecords = ConfigurationHelper.getNumMapOutputRecords(job);
    LOG.info("Retrieved " + numRecords + " records.");
  }
  return success;
}
 
开发者ID:infinidb,项目名称:sqoop,代码行数:27,代码来源:ImportJobBase.java

示例4: runJob

import org.apache.sqoop.util.PerfCounters; //导入依赖的package包/类
@Override
protected boolean runJob(Job job) throws ClassNotFoundException, IOException,
    InterruptedException {

  PerfCounters perfCounters = new PerfCounters();
  perfCounters.startClock();

  boolean success = job.waitForCompletion(true);
  perfCounters.stopClock();

  Counters jobCounters = job.getCounters();
  // If the job has been retired, these may be unavailable.
  if (null == jobCounters) {
    displayRetiredJobNotice(LOG);
  } else {
    perfCounters.addBytes(jobCounters.getGroup("FileSystemCounters")
      .findCounter("HDFS_BYTES_READ").getValue());
    LOG.info("Transferred " + perfCounters.toString());
    long numRecords =  ConfigurationHelper.getNumMapInputRecords(job);
    LOG.info("Exported " + numRecords + " records.");
  }

  return success;
}
 
开发者ID:infinidb,项目名称:sqoop,代码行数:25,代码来源:ExportJobBase.java

示例5: runJob

import org.apache.sqoop.util.PerfCounters; //导入依赖的package包/类
/**
 * Actually run the MapReduce job.
 */
@Override
protected boolean runJob(Job job) throws ClassNotFoundException, IOException,
    InterruptedException {

  PerfCounters perfCounters = new PerfCounters();
  perfCounters.startClock();

  boolean success = doSubmitJob(job);

  if (isHCatJob) {
    SqoopHCatUtilities.instance().invokeOutputCommitterForLocalMode(job);
  }

  perfCounters.stopClock();

  Counters jobCounters = job.getCounters();
  // If the job has been retired, these may be unavailable.
  if (null == jobCounters) {
    displayRetiredJobNotice(LOG);
  } else {
    perfCounters.addBytes(jobCounters.getGroup("FileSystemCounters")
      .findCounter("HDFS_BYTES_WRITTEN").getValue());
    LOG.info("Transferred " + perfCounters.toString());
    long numRecords = ConfigurationHelper.getNumMapOutputRecords(job);
    LOG.info("Retrieved " + numRecords + " records.");
  }
  return success;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:32,代码来源:ImportJobBase.java

示例6: ReparsingStreamThread

import org.apache.sqoop.util.PerfCounters; //导入依赖的package包/类
ReparsingStreamThread(final InputStream is,
    final MySQLDumpMapper.Context c, Configuration conf,
    final PerfCounters ctrs) {
  this.context = c;
  this.conf = conf;
  this.stream = is;
  this.counters = ctrs;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:9,代码来源:MySQLDumpMapper.java

示例7: map

import org.apache.sqoop.util.PerfCounters; //导入依赖的package包/类
public void map(Integer dataSliceId, NullWritable val, Context context)
    throws IOException, InterruptedException {
  conf = context.getConfiguration();
  dbc = new DBConfiguration(conf);
  numMappers = ConfigurationHelper.getConfNumMaps(conf);
  char rd = (char) conf.getInt(DelimiterSet.OUTPUT_RECORD_DELIM_KEY, '\n');
  initNetezzaExternalTableImport(dataSliceId);
  counter = new PerfCounters();
  counter.startClock();
  Text outputRecord = new Text();
  if (extTableThread.isAlive()) {
    try {
      String inputRecord = recordReader.readLine();
      while (inputRecord != null) {
        if (Thread.interrupted()) {
          if (!extTableThread.isAlive()) {
            break;
          }
        }
        outputRecord.set(inputRecord + rd);
        // May be we should set the output to be String for faster performance
        // There is no real benefit in changing it to Text and then
        // converting it back in our case
        context.write(outputRecord, NullWritable.get());
        counter.addBytes(1 + inputRecord.length());
        inputRecord = recordReader.readLine();
      }
    } finally {
      recordReader.close();
      extTableThread.join();
      counter.stopClock();
      LOG.info("Transferred " + counter.toString());
      if (extTableThread.hasExceptions()) {
        extTableThread.printException();
        throw new IOException(extTableThread.getExcepton());
      }
    }
  }
}
 
开发者ID:unicredit,项目名称:zSqoop,代码行数:40,代码来源:NetezzaExternalTableImportMapper.java

示例8: map

import org.apache.sqoop.util.PerfCounters; //导入依赖的package包/类
public void map(Integer dataSliceId, NullWritable val, Context context)
  throws IOException, InterruptedException {
  conf = context.getConfiguration();


  dbc = new DBConfiguration(conf);
  numMappers = ConfigurationHelper.getConfNumMaps(conf);
  char rd = (char) conf.getInt(DelimiterSet.OUTPUT_RECORD_DELIM_KEY, '\n');
  initNetezzaExternalTableImport(dataSliceId);
  counter = new PerfCounters();
  counter.startClock();
  Text outputRecord = new Text();
  if (extTableThread.isAlive()) {
    try {
      String inputRecord = recordReader.readLine();
      while (inputRecord != null) {
        if (Thread.interrupted()) {
          if (!extTableThread.isAlive()) {
            break;
          }
        }
        outputRecord.set(inputRecord + rd);
        // May be we should set the output to be String for faster performance
        // There is no real benefit in changing it to Text and then
        // converting it back in our case
        writeRecord(outputRecord, context);
        counter.addBytes(1 + inputRecord.length());
        inputRecord = recordReader.readLine();
      }
    } finally {
      recordReader.close();
      extTableThread.join();
      counter.stopClock();
      LOG.info("Transferred " + counter.toString());
      if (extTableThread.hasExceptions()) {
        extTableThread.printException();
        throw new IOException(extTableThread.getException());
      }
    }
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:42,代码来源:NetezzaExternalTableImportMapper.java

示例9: CopyingAsyncSink

import org.apache.sqoop.util.PerfCounters; //导入依赖的package包/类
protected CopyingAsyncSink(final MySQLDumpMapper.Context context,
    final PerfCounters ctrs) {
  this.context = context;
  this.counters = ctrs;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:6,代码来源:MySQLDumpMapper.java

示例10: CopyingStreamThread

import org.apache.sqoop.util.PerfCounters; //导入依赖的package包/类
CopyingStreamThread(final InputStream is,
    final Context c, final PerfCounters ctrs) {
  this.context = c;
  this.stream = is;
  this.counters = ctrs;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:7,代码来源:MySQLDumpMapper.java

示例11: ReparsingAsyncSink

import org.apache.sqoop.util.PerfCounters; //导入依赖的package包/类
protected ReparsingAsyncSink(final MySQLDumpMapper.Context c,
    final Configuration conf, final PerfCounters ctrs) {
  this.context = c;
  this.conf = conf;
  this.counters = ctrs;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:7,代码来源:MySQLDumpMapper.java

示例12: initNetezzaExternalTableExport

import org.apache.sqoop.util.PerfCounters; //导入依赖的package包/类
private void initNetezzaExternalTableExport(Context context)
    throws IOException {
  this.conf = context.getConfiguration();
  dbc = new DBConfiguration(conf);
  File taskAttemptDir = TaskId.getLocalWorkPath(conf);
  this.outputDelimiters = new DelimiterSet(',', '\n', '\000', '\\', false);
  this.fifoFile = new File(taskAttemptDir, ("nzexttable-export.txt"));
  String filename = fifoFile.toString();
  NamedFifo nf;
  // Create the FIFO itself.
  try {
    nf = new NamedFifo(this.fifoFile);
    nf.create();
  } catch (IOException ioe) {
    // Command failed.
    LOG.error("Could not create FIFO file " + filename);
    this.fifoFile = null;
    throw new IOException(
        "Could not create FIFO for netezza external table import", ioe);
  }
  String sqlStmt = getSqlStatement();
  boolean cleanup = false;
  try {
    con = dbc.getConnection();
    extTableThread = new NetezzaJDBCStatementRunner(Thread.currentThread(),
        con, sqlStmt);
  } catch (SQLException sqle) {
    cleanup = true;
    throw new IOException(sqle);
  } catch (ClassNotFoundException cnfe) {
    throw new IOException(cnfe);
  } finally {
    if (con != null && cleanup) {
      try {
        con.close();
      } catch (Exception e) {
        LOG.debug("Exception closing connection " + e.getMessage());
      }
    }
    con = null;
  }

  counter = new PerfCounters();
  extTableThread.start();
  // We start the JDBC thread first in this case as we want the FIFO reader to
  // be running.
  recordWriter = new BufferedOutputStream(new FileOutputStream(nf.getFile()));
  counter.startClock();
}
 
开发者ID:unicredit,项目名称:zSqoop,代码行数:50,代码来源:NetezzaExternalTableExportMapper.java


注:本文中的org.apache.sqoop.util.PerfCounters类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。