當前位置: 首頁>>代碼示例>>Java>>正文


Java JobContext.getConfiguration方法代碼示例

本文整理匯總了Java中org.apache.hadoop.mapreduce.JobContext.getConfiguration方法的典型用法代碼示例。如果您正苦於以下問題:Java JobContext.getConfiguration方法的具體用法?Java JobContext.getConfiguration怎麽用?Java JobContext.getConfiguration使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.mapreduce.JobContext的用法示例。


在下文中一共展示了JobContext.getConfiguration方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getSplits

import org.apache.hadoop.mapreduce.JobContext; //導入方法依賴的package包/類
@Override
public List<InputSplit> getSplits(JobContext job) throws IOException {

  long maxSize = 0;
  Configuration conf = job.getConfiguration();

  maxSize = conf.getLong("mapreduce.input.fileinputformat.split.maxsize", 0);

  // all the files in input set
  List<FileStatus> stats = listStatus(job);
  List<InputSplit> splits = new ArrayList<>();
  if (stats.size() == 0) {
    return splits;
  }

  getMoreSplits(conf, stats, maxSize, 0, 0, splits);

  return splits;

}
 
開發者ID:Tencent,項目名稱:angel,代碼行數:21,代碼來源:BalanceInputFormat.java

示例2: getSplits

import org.apache.hadoop.mapreduce.JobContext; //導入方法依賴的package包/類
@Override
public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException {
  final JobClient client =
    new JobClient(new JobConf(jobCtxt.getConfiguration()));
  ClusterStatus stat = client.getClusterStatus(true);
  final long toGen =
    jobCtxt.getConfiguration().getLong(GRIDMIX_GEN_BYTES, -1);
  if (toGen < 0) {
    throw new IOException("Invalid/missing generation bytes: " + toGen);
  }
  final int nTrackers = stat.getTaskTrackers();
  final long bytesPerTracker = toGen / nTrackers;
  final ArrayList<InputSplit> splits = new ArrayList<InputSplit>(nTrackers);
  final Pattern trackerPattern = Pattern.compile("tracker_([^:]*):.*");
  final Matcher m = trackerPattern.matcher("");
  for (String tracker : stat.getActiveTrackerNames()) {
    m.reset(tracker);
    if (!m.find()) {
      System.err.println("Skipping node: " + tracker);
      continue;
    }
    final String name = m.group(1);
    splits.add(new GenSplit(bytesPerTracker, new String[] { name }));
  }
  return splits;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:27,代碼來源:GenerateData.java

示例3: checkOutputSpecs

import org.apache.hadoop.mapreduce.JobContext; //導入方法依賴的package包/類
@Override
/** {@inheritDoc} */
public void checkOutputSpecs(JobContext context)
    throws IOException, InterruptedException {
  Configuration conf = context.getConfiguration();
  DBConfiguration dbConf = new DBConfiguration(conf);

  // Sanity check all the configuration values we need.
  if (null == conf.get(DBConfiguration.URL_PROPERTY)) {
    throw new IOException("Database connection URL is not set.");
  } else if (null == dbConf.getOutputTableName()) {
    throw new IOException("Procedure name is not set for export");
  } else if (null == dbConf.getOutputFieldNames()
      && 0 == dbConf.getOutputFieldCount()) {
    throw new IOException(
        "Output field names are null and zero output field count set.");
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:19,代碼來源:ExportCallOutputFormat.java

示例4: checkOutputSpecs

import org.apache.hadoop.mapreduce.JobContext; //導入方法依賴的package包/類
@Override
/** {@inheritDoc} */
public void checkOutputSpecs(JobContext context)
    throws IOException, InterruptedException {
  Configuration conf = context.getConfiguration();
  DBConfiguration dbConf = new DBConfiguration(conf);

  // Sanity check all the configuration values we need.
  if (null == conf.get(DBConfiguration.URL_PROPERTY)) {
    throw new IOException("Database connection URL is not set.");
  } else if (null == dbConf.getOutputTableName()) {
    throw new IOException("Table name is not set for export.");
  } else if (null == dbConf.getOutputFieldNames()) {
    throw new IOException(
        "Output field names are null.");
  } else if (null == conf.get(ExportJobBase.SQOOP_EXPORT_UPDATE_COL_KEY)) {
    throw new IOException("Update key column is not set for export.");
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:20,代碼來源:SQLServerResilientUpdateOutputFormat.java

示例5: checkOutputSpecs

import org.apache.hadoop.mapreduce.JobContext; //導入方法依賴的package包/類
@Override
/** {@inheritDoc} */
public void checkOutputSpecs(JobContext context)
    throws IOException, InterruptedException {
  Configuration conf = context.getConfiguration();
  DBConfiguration dbConf = new DBConfiguration(conf);

  // Sanity check all the configuration values we need.
  if (null == conf.get(DBConfiguration.URL_PROPERTY)) {
    throw new IOException("Database connection URL is not set.");
  } else if (null == dbConf.getOutputTableName()) {
    throw new IOException("Table name is not set for export");
  } else if (null == dbConf.getOutputFieldNames()
      && 0 == dbConf.getOutputFieldCount()) {
    throw new IOException(
        "Output field names are null and zero output field count set.");
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:19,代碼來源:ExportOutputFormat.java

示例6: getSplits

import org.apache.hadoop.mapreduce.JobContext; //導入方法依賴的package包/類
@Override
public List<InputSplit> getSplits(JobContext context) throws IOException, InterruptedException {
  Configuration conf = context.getConfiguration();
  Path snapshotDir = new Path(conf.get(CONF_SNAPSHOT_DIR));
  FileSystem fs = FileSystem.get(snapshotDir.toUri(), conf);

  List<Pair<SnapshotFileInfo, Long>> snapshotFiles = getSnapshotFiles(conf, fs, snapshotDir);
  int mappers = conf.getInt(CONF_NUM_SPLITS, 0);
  if (mappers == 0 && snapshotFiles.size() > 0) {
    mappers = 1 + (snapshotFiles.size() / conf.getInt(CONF_MAP_GROUP, 10));
    mappers = Math.min(mappers, snapshotFiles.size());
    conf.setInt(CONF_NUM_SPLITS, mappers);
    conf.setInt(MR_NUM_MAPS, mappers);
  }

  List<List<Pair<SnapshotFileInfo, Long>>> groups = getBalancedSplits(snapshotFiles, mappers);
  List<InputSplit> splits = new ArrayList(groups.size());
  for (List<Pair<SnapshotFileInfo, Long>> files: groups) {
    splits.add(new ExportSnapshotInputSplit(files));
  }
  return splits;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:ExportSnapshot.java

示例7: getOutputCompressorClass

import org.apache.hadoop.mapreduce.JobContext; //導入方法依賴的package包/類
/**
 * Get the {@link CompressionCodec} for compressing the job outputs.
 * @param job the {@link Job} to look in
 * @param defaultValue the {@link CompressionCodec} to return if not set
 * @return the {@link CompressionCodec} to be used to compress the 
 *         job outputs
 * @throws IllegalArgumentException if the class was specified, but not found
 */
public static Class<? extends CompressionCodec> 
getOutputCompressorClass(JobContext job, 
                       Class<? extends CompressionCodec> defaultValue) {
  Class<? extends CompressionCodec> codecClass = defaultValue;
  Configuration conf = job.getConfiguration();
  String name = conf.get(FileOutputFormat.COMPRESS_CODEC);
  if (name != null) {
    try {
      codecClass = 
      	conf.getClassByName(name).asSubclass(CompressionCodec.class);
    } catch (ClassNotFoundException e) {
      throw new IllegalArgumentException("Compression codec " + name + 
                                         " was not found.", e);
    }
  }
  return codecClass;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:FileOutputFormat.java

示例8: getSplits

import org.apache.hadoop.mapreduce.JobContext; //導入方法依賴的package包/類
/**
 * implementation shared with deprecated HLogInputFormat
 */
List<InputSplit> getSplits(final JobContext context, final String startKey, final String endKey)
    throws IOException, InterruptedException {
  Configuration conf = context.getConfiguration();
  Path inputDir = new Path(conf.get("mapreduce.input.fileinputformat.inputdir"));

  long startTime = conf.getLong(startKey, Long.MIN_VALUE);
  long endTime = conf.getLong(endKey, Long.MAX_VALUE);

  FileSystem fs = inputDir.getFileSystem(conf);
  List<FileStatus> files = getFiles(fs, inputDir, startTime, endTime);

  List<InputSplit> splits = new ArrayList<InputSplit>(files.size());
  for (FileStatus file : files) {
    splits.add(new WALSplit(file.getPath().toString(), file.getLen(), startTime, endTime));
  }
  return splits;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:21,代碼來源:WALInputFormat.java

示例9: commitJob

import org.apache.hadoop.mapreduce.JobContext; //導入方法依賴的package包/類
/** @inheritDoc */
@Override
public void commitJob(JobContext jobContext) throws IOException {
  Configuration conf = jobContext.getConfiguration();

  super.commitJob(jobContext);

  try {
    taskAttemptContext.setStatus("Commit Successful");
  } finally {
    cleanup(conf);
  }
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:14,代碼來源:CopyCommitter.java

示例10: getSplits

import org.apache.hadoop.mapreduce.JobContext; //導入方法依賴的package包/類
/**
 * Implementation of InputFormat::getSplits(). Returns a list of InputSplits, such that the number of bytes to be
 * copied for all the splits are approximately equal.
 *
 * @param context JobContext for the job.
 * @return The list of uniformly-distributed input-splits.
 * @throws IOException: On failure.
 * @throws InterruptedException
 */
@Override
public List<InputSplit> getSplits(JobContext context) throws IOException, InterruptedException {
  Configuration configuration = context.getConfiguration();
  int numSplits = ConfigurationUtil.getInt(configuration, MRJobConfig.NUM_MAPS);

  if (numSplits == 0) {
    return new ArrayList<>();
  }

  return getSplits(configuration, numSplits,
      ConfigurationUtil.getLong(configuration, S3MapReduceCpConstants.CONF_LABEL_TOTAL_BYTES_TO_BE_COPIED));
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:22,代碼來源:UniformSizeInputFormat.java

示例11: checkOutputSpecs

import org.apache.hadoop.mapreduce.JobContext; //導入方法依賴的package包/類
@Override
public void checkOutputSpecs(JobContext job
                            ) throws InvalidJobConfException, IOException {
  // Ensure that the output directory is set
  Path outDir = getOutputPath(job);
  if (outDir == null) {
    throw new InvalidJobConfException("Output directory not set in JobConf.");
  }

  final Configuration jobConf = job.getConfiguration();

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(job.getCredentials(),
      new Path[] { outDir }, jobConf);

  final FileSystem fs = outDir.getFileSystem(jobConf);

  if (fs.exists(outDir)) {
    // existing output dir is considered empty iff its only content is the
    // partition file.
    //
    final FileStatus[] outDirKids = fs.listStatus(outDir);
    boolean empty = false;
    if (outDirKids != null && outDirKids.length == 1) {
      final FileStatus st = outDirKids[0];
      final String fname = st.getPath().getName();
      empty =
        !st.isDirectory() && TeraInputFormat.PARTITION_FILENAME.equals(fname);
    }
    if (TeraSort.getUseSimplePartitioner(job) || !empty) {
      throw new FileAlreadyExistsException("Output directory " + outDir
          + " already exists");
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:36,代碼來源:TeraOutputFormat.java

示例12: checkOutputSpecs

import org.apache.hadoop.mapreduce.JobContext; //導入方法依賴的package包/類
@Override
public void checkOutputSpecs(JobContext context) throws IOException,
    InterruptedException {

  super.checkOutputSpecs(context);

  Configuration conf = context.getConfiguration();

  // This code is now running on a Datanode in the Hadoop cluster, so we
  // need to enable debug logging in this JVM...
  OraOopUtilities.enableDebugLoggingIfRequired(conf);
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:13,代碼來源:OraOopOutputFormatBase.java

示例13: getSplits

import org.apache.hadoop.mapreduce.JobContext; //導入方法依賴的package包/類
/** @return a list containing a single split of summation */
@Override
public List<InputSplit> getSplits(JobContext context) {
  //read sigma from conf
  final Configuration conf = context.getConfiguration();
  final Summation sigma = SummationWritable.read(DistSum.class, conf); 
  
  //create splits
  final List<InputSplit> splits = new ArrayList<InputSplit>(1);
  splits.add(new SummationSplit(sigma));
  return splits;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:13,代碼來源:DistSum.java

示例14: getSplits

import org.apache.hadoop.mapreduce.JobContext; //導入方法依賴的package包/類
@Override
public List<InputSplit> getSplits(JobContext job) throws IOException {
  List<InputSplit> splits = new ArrayList<InputSplit>();
  Configuration conf = job.getConfiguration();
  String dsName
      = conf.get(MainframeConfiguration.MAINFRAME_INPUT_DATASET_NAME);
  LOG.info("Datasets to transfer from: " + dsName);
  List<String> datasets = retrieveDatasets(dsName, conf);
  if (datasets.isEmpty()) {
    throw new IOException ("No sequential datasets retrieved from " + dsName);
  } else {
    int count = datasets.size();
    int chunks = Math.min(count, ConfigurationHelper.getJobNumMaps(job));
    for (int i = 0; i < chunks; i++) {
      splits.add(new MainframeDatasetInputSplit());
    }

    int j = 0;
    while(j < count) {
      for (InputSplit sp : splits) {
        if (j == count) {
          break;
        }
        ((MainframeDatasetInputSplit)sp).addDataset(datasets.get(j));
        j++;
      }
    }
  }
  return splits;
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:31,代碼來源:MainframeDatasetInputFormat.java

示例15: getInputPathFilter

import org.apache.hadoop.mapreduce.JobContext; //導入方法依賴的package包/類
/**
 * Get a PathFilter instance of the filter set for the input paths.
 *
 * @return the PathFilter instance set for the job, NULL if none has been set.
 */
public static PathFilter getInputPathFilter(JobContext context) {
  Configuration conf = context.getConfiguration();
  Class<?> filterClass = conf.getClass(PATHFILTER_CLASS, null,
      PathFilter.class);
  return (filterClass != null) ?
      (PathFilter) ReflectionUtils.newInstance(filterClass, conf) : null;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:13,代碼來源:FileInputFormat.java


注:本文中的org.apache.hadoop.mapreduce.JobContext.getConfiguration方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。