本文整理汇总了Java中org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo类的典型用法代码示例。如果您正苦于以下问题:Java SplitMetaInfo类的具体用法?Java SplitMetaInfo怎么用?Java SplitMetaInfo使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SplitMetaInfo类属于org.apache.hadoop.mapreduce.split.JobSplit包,在下文中一共展示了SplitMetaInfo类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: writeOldSplits
import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
private static SplitMetaInfo[] writeOldSplits(
org.apache.hadoop.mapred.InputSplit[] splits,
FSDataOutputStream out) throws IOException {
SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
if (splits.length != 0) {
int i = 0;
long offset = out.getPos();
for(org.apache.hadoop.mapred.InputSplit split: splits) {
long prevLen = out.getPos();
Text.writeString(out, split.getClass().getName());
split.write(out);
long currLen = out.getPos();
info[i++] = new JobSplit.SplitMetaInfo(
split.getLocations(), offset,
split.getLength());
offset += currLen - prevLen;
}
}
return info;
}
示例2: writeOldSplits
import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
private static SplitMetaInfo[] writeOldSplits(org.apache.hadoop.mapred.InputSplit[] splits, FSDataOutputStream out,
Configuration conf) throws IOException {
SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
if(splits.length != 0) {
int i = 0;
long offset = out.getPos();
for(org.apache.hadoop.mapred.InputSplit split: splits) {
long prevLen = out.getPos();
Text.writeString(out, split.getClass().getName());
split.write(out);
long currLen = out.getPos();
String[] locations = split.getLocations();
final int max_loc = conf.getInt(MAX_SPLIT_LOCATIONS, 10);
if(locations.length > max_loc) {
LOG.warn("Max block location exceeded for split: " + split + " splitsize: " + locations.length
+ " maxsize: " + max_loc);
locations = Arrays.copyOf(locations, max_loc);
}
info[i++] = new JobSplit.SplitMetaInfo(locations, offset, split.getLength());
offset += currLen - prevLen;
}
}
return info;
}
示例3: writeJobSplitMetaInfo
import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
private static void writeJobSplitMetaInfo(FileSystem fs, Path filename, FsPermission p, int splitMetaInfoVersion,
JobSplit.SplitMetaInfo[] allSplitMetaInfo) throws IOException {
// write the splits meta-info to a file for the job tracker
FSDataOutputStream out = null;
try {
out = FileSystem.create(fs, filename, p);
out.write(META_SPLIT_FILE_HEADER);
WritableUtils.writeVInt(out, splitMetaInfoVersion);
WritableUtils.writeVInt(out, allSplitMetaInfo.length);
for(JobSplit.SplitMetaInfo splitMetaInfo: allSplitMetaInfo) {
splitMetaInfo.write(out);
}
} finally {
IOUtils.closeStream(out);
}
}
示例4: writeOldSplits
import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
private static SplitMetaInfo[] writeOldSplits(
org.apache.hadoop.mapred.InputSplit[] splits,
FSDataOutputStream out) throws IOException {
SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
if (splits.length != 0) {
int i = 0;
long offset = out.size();
for(org.apache.hadoop.mapred.InputSplit split: splits) {
int prevLen = out.size();
Text.writeString(out, split.getClass().getName());
split.write(out);
int currLen = out.size();
info[i++] = new JobSplit.SplitMetaInfo(
split.getLocations(), offset,
split.getLength());
offset += currLen - prevLen;
}
}
return info;
}
示例5: createSplitFiles
import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir,
Configuration conf, FileSystem fs, T[] splits)
throws IOException, InterruptedException {
FSDataOutputStream out = createFile(fs,
JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
SplitMetaInfo[] info = writeNewSplits(conf, splits, out);
out.close();
writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir),
new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
info);
}
示例6: writeNewSplits
import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private static <T extends InputSplit>
SplitMetaInfo[] writeNewSplits(Configuration conf,
T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {
SplitMetaInfo[] info = new SplitMetaInfo[array.length];
if (array.length != 0) {
SerializationFactory factory = new SerializationFactory(conf);
int i = 0;
int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
long offset = out.getPos();
for(T split: array) {
long prevCount = out.getPos();
Text.writeString(out, split.getClass().getName());
Serializer<T> serializer =
factory.getSerializer((Class<T>) split.getClass());
serializer.open(out);
serializer.serialize(split);
long currCount = out.getPos();
String[] locations = split.getLocations();
if (locations.length > maxBlockLocations) {
LOG.warn("Max block location exceeded for split: "
+ split + " splitsize: " + locations.length +
" maxsize: " + maxBlockLocations);
locations = Arrays.copyOf(locations, maxBlockLocations);
}
info[i++] =
new JobSplit.SplitMetaInfo(
locations, offset,
split.getLength());
offset += currCount - prevCount;
}
}
return info;
}
示例7: writeOldSplits
import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
private static SplitMetaInfo[] writeOldSplits(
org.apache.hadoop.mapred.InputSplit[] splits,
FSDataOutputStream out, Configuration conf) throws IOException {
SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
if (splits.length != 0) {
int i = 0;
long offset = out.getPos();
int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
for(org.apache.hadoop.mapred.InputSplit split: splits) {
long prevLen = out.getPos();
Text.writeString(out, split.getClass().getName());
split.write(out);
long currLen = out.getPos();
String[] locations = split.getLocations();
if (locations.length > maxBlockLocations) {
LOG.warn("Max block location exceeded for split: "
+ split + " splitsize: " + locations.length +
" maxsize: " + maxBlockLocations);
locations = Arrays.copyOf(locations,maxBlockLocations);
}
info[i++] = new JobSplit.SplitMetaInfo(
locations, offset,
split.getLength());
offset += currLen - prevLen;
}
}
return info;
}
示例8: writeJobSplitMetaInfo
import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
private static void writeJobSplitMetaInfo(FileSystem fs, Path filename,
FsPermission p, int splitMetaInfoVersion,
JobSplit.SplitMetaInfo[] allSplitMetaInfo)
throws IOException {
// write the splits meta-info to a file for the job tracker
FSDataOutputStream out =
FileSystem.create(fs, filename, p);
out.write(JobSplit.META_SPLIT_FILE_HEADER);
WritableUtils.writeVInt(out, splitMetaInfoVersion);
WritableUtils.writeVInt(out, allSplitMetaInfo.length);
for (JobSplit.SplitMetaInfo splitMetaInfo : allSplitMetaInfo) {
splitMetaInfo.write(out);
}
out.close();
}
示例9: writeNewSplits
import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private static <T extends InputSplit>
SplitMetaInfo[] writeNewSplits(Configuration conf,
T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {
SplitMetaInfo[] info = new SplitMetaInfo[array.length];
if (array.length != 0) {
SerializationFactory factory = new SerializationFactory(conf);
int i = 0;
long offset = out.getPos();
for(T split: array) {
long prevCount = out.getPos();
Text.writeString(out, split.getClass().getName());
Serializer<T> serializer =
factory.getSerializer((Class<T>) split.getClass());
serializer.open(out);
serializer.serialize(split);
long currCount = out.getPos();
info[i++] =
new JobSplit.SplitMetaInfo(
split.getLocations(), offset,
split.getLength());
offset += currCount - prevCount;
}
}
return info;
}
示例10: writeNewSplits
import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private static <T extends InputSplit>
SplitMetaInfo[] writeNewSplits(Configuration conf,
T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {
SplitMetaInfo[] info = new SplitMetaInfo[array.length];
if (array.length != 0) {
SerializationFactory factory = new SerializationFactory(conf);
int i = 0;
int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
long offset = out.getPos();
for(T split: array) {
long prevCount = out.getPos();
Text.writeString(out, split.getClass().getName());
Serializer<T> serializer =
factory.getSerializer((Class<T>) split.getClass());
serializer.open(out);
serializer.serialize(split);
long currCount = out.getPos();
String[] locations = split.getLocations();
if (locations.length > maxBlockLocations) {
throw new IOException("Max block location exceeded for split: "
+ split + " splitsize: " + locations.length +
" maxsize: " + maxBlockLocations);
}
info[i++] =
new JobSplit.SplitMetaInfo(
locations, offset,
split.getLength());
offset += currCount - prevCount;
}
}
return info;
}
示例11: writeOldSplits
import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
private static SplitMetaInfo[] writeOldSplits(
org.apache.hadoop.mapred.InputSplit[] splits,
FSDataOutputStream out, Configuration conf) throws IOException {
SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
if (splits.length != 0) {
int i = 0;
long offset = out.getPos();
int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
for(org.apache.hadoop.mapred.InputSplit split: splits) {
long prevLen = out.getPos();
Text.writeString(out, split.getClass().getName());
split.write(out);
long currLen = out.getPos();
String[] locations = split.getLocations();
if (locations.length > maxBlockLocations) {
throw new IOException("Max block location exceeded for split: "
+ split + " splitsize: " + locations.length +
" maxsize: " + maxBlockLocations);
}
info[i++] = new JobSplit.SplitMetaInfo(
locations, offset,
split.getLength());
offset += currLen - prevLen;
}
}
return info;
}