本文整理汇总了Java中org.apache.hadoop.mapred.jobcontrol.Job.getJobConf方法的典型用法代码示例。如果您正苦于以下问题:Java Job.getJobConf方法的具体用法?Java Job.getJobConf怎么用?Java Job.getJobConf使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapred.jobcontrol.Job
的用法示例。
在下文中一共展示了Job.getJobConf方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getTaskReports
import org.apache.hadoop.mapred.jobcontrol.Job; //导入方法依赖的package包/类
public static Iterator<TaskReport> getTaskReports(Job job, TaskType type) throws IOException {
if (job.getJobConf().getBoolean(PigConfiguration.PIG_NO_TASK_REPORT, false)) {
LOG.info("TaskReports are disabled for job: " + job.getAssignedJobID());
return null;
}
Cluster cluster = new Cluster(job.getJobConf());
try {
org.apache.hadoop.mapreduce.Job mrJob = cluster.getJob(job.getAssignedJobID());
if (mrJob == null) { // In local mode, mrJob will be null
mrJob = job.getJob();
}
org.apache.hadoop.mapreduce.TaskReport[] reports = mrJob.getTaskReports(type);
return DowngradeHelper.downgradeTaskReports(reports);
} catch (InterruptedException ir) {
throw new IOException(ir);
}
}
示例2: IllustratorContext
import org.apache.hadoop.mapred.jobcontrol.Job; //导入方法依赖的package包/类
public IllustratorContext(Job job,
List<Pair<PigNullableWritable, Writable>> input,
POPackage pkg
) throws IOException, InterruptedException {
super(job.getJobConf(), new TaskAttemptID(), new FakeRawKeyValueIterator(input.iterator().hasNext()),
null, null, null, null, new IllustrateDummyReporter(), null, PigNullableWritable.class, NullableTuple.class);
bos = new ByteArrayOutputStream();
dos = new DataOutputStream(bos);
org.apache.hadoop.mapreduce.Job nwJob = new org.apache.hadoop.mapreduce.Job(job.getJobConf());
sortComparator = nwJob.getSortComparator();
groupingComparator = nwJob.getGroupingComparator();
Collections.sort(input, new Comparator<Pair<PigNullableWritable, Writable>>() {
@Override
public int compare(Pair<PigNullableWritable, Writable> o1,
Pair<PigNullableWritable, Writable> o2) {
try {
o1.first.write(dos);
int l1 = bos.size();
o2.first.write(dos);
int l2 = bos.size();
byte[] bytes = bos.toByteArray();
bos.reset();
return sortComparator.compare(bytes, 0, l1, bytes, l1, l2-l1);
} catch (IOException e) {
throw new RuntimeException("Serialization exception in sort:"+e.getMessage());
}
}
}
);
currentValues = new ArrayList<NullableTuple>();
it = input.iterator();
if (it.hasNext()) {
Pair<PigNullableWritable, Writable> entry = it.next();
nextKey = entry.first;
nextValue = (NullableTuple) entry.second;
}
pack = pkg;
}
示例3: IllustratorContextImpl
import org.apache.hadoop.mapred.jobcontrol.Job; //导入方法依赖的package包/类
public IllustratorContextImpl(Job job,
List<Pair<PigNullableWritable, Writable>> input,
POPackage pkg
) throws IOException, InterruptedException {
super(job.getJobConf(), new TaskAttemptID(), new FakeRawKeyValueIterator(input.iterator().hasNext()),
null, null, null, null, new IllustrateDummyReporter(), null, PigNullableWritable.class, NullableTuple.class);
bos = new ByteArrayOutputStream();
dos = new DataOutputStream(bos);
org.apache.hadoop.mapreduce.Job nwJob = new org.apache.hadoop.mapreduce.Job(job.getJobConf());
sortComparator = nwJob.getSortComparator();
groupingComparator = nwJob.getGroupingComparator();
Collections.sort(input, new Comparator<Pair<PigNullableWritable, Writable>>() {
@Override
public int compare(Pair<PigNullableWritable, Writable> o1,
Pair<PigNullableWritable, Writable> o2) {
try {
o1.first.write(dos);
int l1 = bos.size();
o2.first.write(dos);
int l2 = bos.size();
byte[] bytes = bos.toByteArray();
bos.reset();
return sortComparator.compare(bytes, 0, l1, bytes, l1, l2-l1);
} catch (IOException e) {
throw new RuntimeException("Serialization exception in sort:"+e.getMessage());
}
}
}
);
currentValues = new ArrayList<NullableTuple>();
it = input.iterator();
if (it.hasNext()) {
Pair<PigNullableWritable, Writable> entry = it.next();
nextKey = entry.first;
nextValue = (NullableTuple) entry.second;
}
pack = pkg;
}
示例4: getCounters
import org.apache.hadoop.mapred.jobcontrol.Job; //导入方法依赖的package包/类
public static Counters getCounters(Job job) throws IOException {
try {
Cluster cluster = new Cluster(job.getJobConf());
org.apache.hadoop.mapreduce.Job mrJob = cluster.getJob(job.getAssignedJobID());
if (mrJob == null) { // In local mode, mrJob will be null
mrJob = job.getJob();
}
return new Counters(mrJob.getCounters());
} catch (Exception ir) {
throw new IOException(ir);
}
}