本文整理汇总了Java中org.apache.hadoop.mapreduce.split.SplitMetaInfoReader类的典型用法代码示例。如果您正苦于以下问题:Java SplitMetaInfoReader类的具体用法?Java SplitMetaInfoReader怎么用?Java SplitMetaInfoReader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SplitMetaInfoReader类属于org.apache.hadoop.mapreduce.split包,在下文中一共展示了SplitMetaInfoReader类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getMapLocationHintsFromInputSplits
import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader; //导入依赖的package包/类
private List<TaskLocationHint> getMapLocationHintsFromInputSplits(JobID jobId,
FileSystem fs, Configuration conf,
String jobSubmitDir) throws IOException {
TaskSplitMetaInfo[] splitsInfo =
SplitMetaInfoReader.readSplitMetaInfo(jobId, fs, conf,
new Path(jobSubmitDir));
int splitsCount = splitsInfo.length;
List<TaskLocationHint> locationHints =
new ArrayList<TaskLocationHint>(splitsCount);
for (int i = 0; i < splitsCount; ++i) {
TaskLocationHint locationHint =
new TaskLocationHint(
new HashSet<String>(
Arrays.asList(splitsInfo[i].getLocations())), null);
locationHints.add(locationHint);
}
return locationHints;
}
示例2: verifyLocationHints
import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader; //导入依赖的package包/类
private void verifyLocationHints(Path inputSplitsDir,
List<TaskLocationHint> actual) throws Exception {
JobID jobId = new JobID("dummy", 1);
TaskSplitMetaInfo[] splitsInfo =
SplitMetaInfoReader.readSplitMetaInfo(jobId , remoteFs,
conf, inputSplitsDir);
int splitsCount = splitsInfo.length;
List<TaskLocationHint> locationHints =
new ArrayList<TaskLocationHint>(splitsCount);
for (int i = 0; i < splitsCount; ++i) {
locationHints.add(
new TaskLocationHint(new HashSet<String>(
Arrays.asList(splitsInfo[i].getLocations())), null));
}
Assert.assertEquals(locationHints, actual);
}
示例3: getMapLocationHintsFromInputSplits
import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader; //导入依赖的package包/类
private List<TaskLocationHint> getMapLocationHintsFromInputSplits(JobID jobId,
FileSystem fs, Configuration conf,
String jobSubmitDir) throws IOException {
TaskSplitMetaInfo[] splitsInfo =
SplitMetaInfoReader.readSplitMetaInfo(jobId, fs, conf,
new Path(jobSubmitDir));
int splitsCount = splitsInfo.length;
List<TaskLocationHint> locationHints =
new ArrayList<TaskLocationHint>(splitsCount);
for (int i = 0; i < splitsCount; ++i) {
TaskLocationHint locationHint =
TaskLocationHint.createTaskLocationHint(
new HashSet<String>(
Arrays.asList(splitsInfo[i].getLocations())), null
);
locationHints.add(locationHint);
}
return locationHints;
}
示例4: verifyLocationHints
import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader; //导入依赖的package包/类
private void verifyLocationHints(Path inputSplitsDir,
List<TaskLocationHint> actual) throws Exception {
JobID jobId = new JobID("dummy", 1);
JobSplit.TaskSplitMetaInfo[] splitsInfo =
SplitMetaInfoReader.readSplitMetaInfo(jobId, remoteFs,
conf, inputSplitsDir);
int splitsCount = splitsInfo.length;
List<TaskLocationHint> locationHints =
new ArrayList<TaskLocationHint>(splitsCount);
for (int i = 0; i < splitsCount; ++i) {
locationHints.add(
TaskLocationHint.createTaskLocationHint(new HashSet<String>(
Arrays.asList(splitsInfo[i].getLocations())), null)
);
}
Assert.assertEquals(locationHints, actual);
}
示例5: createSplits
import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader; //导入依赖的package包/类
protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
TaskSplitMetaInfo[] allTaskSplitMetaInfo;
try {
allTaskSplitMetaInfo = SplitMetaInfoReader.readSplitMetaInfo(
job.oldJobId, job.fs,
job.conf,
job.remoteJobSubmitDir);
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
return allTaskSplitMetaInfo;
}
示例6: createSplits
import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader; //导入依赖的package包/类
TaskSplitMetaInfo[] createSplits(org.apache.hadoop.mapreduce.JobID jobId)
throws IOException {
TaskSplitMetaInfo[] allTaskSplitMetaInfo =
SplitMetaInfoReader.readSplitMetaInfo(jobId, fs, jobtracker.getConf(),
jobSubmitDir);
return allTaskSplitMetaInfo;
}
示例7: createSplits
import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader; //导入依赖的package包/类
TaskSplitMetaInfo[] createSplits(org.apache.hadoop.mapreduce.JobID jobId)
throws IOException {
TaskSplitMetaInfo[] allTaskSplitMetaInfo =
SplitMetaInfoReader.readSplitMetaInfo(jobId, fs, jobtracker.getConf(), jobSubmitDir);
return allTaskSplitMetaInfo;
}
示例8: SubmittedJob
import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader; //导入依赖的package包/类
SubmittedJob(JobID jobID, String jobSubmitDirectory, Credentials credentials, Configuration configuration) throws IOException, InterruptedException {
this.jobID = jobID;
this.configuration = configuration;
this.jobSubmitDirectoryPath = new Path(jobSubmitDirectory);
this.fileSystem = FileSystem.get(configuration);
JobSplit.TaskSplitMetaInfo splitInfo[] = SplitMetaInfoReader.readSplitMetaInfo(jobID, fileSystem, configuration, jobSubmitDirectoryPath);
Path jobSplitFile = JobSubmissionFiles.getJobSplitFile(jobSubmitDirectoryPath);
FSDataInputStream stream = fileSystem.open(jobSplitFile);
for (JobSplit.TaskSplitMetaInfo info : splitInfo) {
Object split = getSplitDetails(stream, info.getStartOffset(), configuration);
inputSplits.add(split);
splitLocations.put(split, info.getLocations());
LOG.info("Adding split for execution. Split = " + split + " Locations: " + Arrays.toString(splitLocations.get(split)));
}
stream.close();
jobConfPath = JobSubmissionFiles.getJobConfPath(jobSubmitDirectoryPath);
if (!fileSystem.exists(jobConfPath)) {
throw new IOException("Cannot find job.xml. Path = " + jobConfPath);
}
//We cannot just use JobConf(Path) constructor,
//because it does not work for HDFS locations.
//The comment in Configuration#loadResource() states,
//for the case when the Path to the resource is provided:
//"Can't use FileSystem API or we get an infinite loop
//since FileSystem uses Configuration API. Use java.io.File instead."
//
//Workaround: construct empty Configuration, provide it with
//input stream and give it to JobConf constructor.
FSDataInputStream jobConfStream = fileSystem.open(jobConfPath);
Configuration jobXML = new Configuration(false);
jobXML.addResource(jobConfStream);
//The configuration does not actually gets read before we attempt to
//read some property. Call to #size() will make Configuration to
//read the input stream.
jobXML.size();
//We are done with input stream, can close it now.
jobConfStream.close();
jobConf = new JobConf(jobXML);
newApi = jobConf.getUseNewMapper();
jobStatus = new JobStatus(jobID, 0f, 0f, 0f, 0f,
JobStatus.State.RUNNING,
JobPriority.NORMAL,
UserGroupInformation.getCurrentUser().getUserName(),
jobID.toString(),
jobConfPath.toString(), "");
}
示例9: createSplits
import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader; //导入依赖的package包/类
TaskSplitMetaInfo[] createSplits(org.apache.hadoop.mapreduce.JobID jobId)
throws IOException {
TaskSplitMetaInfo[] allTaskSplitMetaInfo =
SplitMetaInfoReader.readSplitMetaInfo(jobId, fs, conf, jobSubmitDir);
return allTaskSplitMetaInfo;
}