本文整理汇总了Java中org.apache.hadoop.mapreduce.Job.getJobID方法的典型用法代码示例。如果您正苦于以下问题:Java Job.getJobID方法的具体用法?Java Job.getJobID怎么用?Java Job.getJobID使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapreduce.Job
的用法示例。
在下文中一共展示了Job.getJobID方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: copy
import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
@Override
public Metrics copy() throws CircusTrainException {
LOG.info("Copying table data.");
LOG.debug("Invoking S3MapReduceCp: {} -> {}", sourceDataBaseLocation, replicaDataLocation);
S3MapReduceCpOptions s3MapReduceCpOptions = parseCopierOptions(copierOptions);
LOG.debug("Invoking S3MapReduceCp with options: {}", s3MapReduceCpOptions);
try {
Enum<?> counter = Counter.BYTESCOPIED;
Job job = executor.exec(conf, s3MapReduceCpOptions);
registerRunningJobMetrics(job, counter);
if (!job.waitForCompletion(true)) {
throw new IOException(
"S3MapReduceCp failure: Job " + job.getJobID() + " has failed: " + job.getStatus().getFailureInfo());
}
return new JobMetrics(job, counter);
} catch (Exception e) {
cleanUpReplicaDataLocation();
throw new CircusTrainException("Unable to copy file(s)", e);
}
}
示例2: generateJobStats
import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
/**
* Generates a job stats.
*/
public static JobStats generateJobStats(Job job, JobStory jobdesc) {
int seq = GridmixJob.getJobSeqId(job);
// bail out if job description is missing for a job to be simulated
if (seq >= 0 && jobdesc == null) {
throw new IllegalArgumentException("JobStory not available for job "
+ job.getJobID());
}
int maps = -1;
int reds = -1;
if (jobdesc != null) {
// Note that the ZombieJob will return a >= 0 value
maps = jobdesc.getNumberMaps();
reds = jobdesc.getNumberReduces();
}
return new JobStats(maps, reds, job);
}
示例3: copy
import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
@Override
public Metrics copy() throws CircusTrainException {
LOG.info("Copying table data.");
LOG.debug("Invoking DistCp: {} -> {}", sourceDataBaseLocation, replicaDataLocation);
DistCpOptions distCpOptions = parseCopierOptions(copierOptions);
LOG.debug("Invoking DistCp with options: {}", distCpOptions);
CircusTrainCopyListing.setAsCopyListingClass(conf);
CircusTrainCopyListing.setRootPath(conf, sourceDataBaseLocation);
try {
loadHComS3AFileSystem();
distCpOptions.setBlocking(false);
Job job = executor.exec(conf, distCpOptions);
String counter = String.format("%s_BYTES_WRITTEN", replicaDataLocation.toUri().getScheme().toUpperCase());
registerRunningJobMetrics(job, counter);
if (!job.waitForCompletion(true)) {
throw new IOException(
"DistCp failure: Job " + job.getJobID() + " has failed: " + job.getStatus().getFailureInfo());
}
return new JobMetrics(job, FileSystemCounter.class.getName(), counter);
} catch (Exception e) {
cleanUpReplicaDataLocation();
throw new CircusTrainException("Unable to copy file(s)", e);
}
}