当前位置: 首页>>代码示例>>Java>>正文


Java SplitMetaInfo类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo的典型用法代码示例。如果您正苦于以下问题:Java SplitMetaInfo类的具体用法?Java SplitMetaInfo怎么用?Java SplitMetaInfo使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


SplitMetaInfo类属于org.apache.hadoop.mapreduce.split.JobSplit包,在下文中一共展示了SplitMetaInfo类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: writeOldSplits

import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
private static SplitMetaInfo[] writeOldSplits(
    org.apache.hadoop.mapred.InputSplit[] splits,
    FSDataOutputStream out) throws IOException {
  SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
  if (splits.length != 0) {
    int i = 0;
    long offset = out.getPos();
    for(org.apache.hadoop.mapred.InputSplit split: splits) {
      long prevLen = out.getPos();
      Text.writeString(out, split.getClass().getName());
      split.write(out);
      long currLen = out.getPos();
      info[i++] = new JobSplit.SplitMetaInfo( 
          split.getLocations(), offset,
          split.getLength());
      offset += currLen - prevLen;
    }
  }
  return info;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:21,代码来源:JobSplitWriter.java

示例2: writeOldSplits

import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
private static SplitMetaInfo[] writeOldSplits(org.apache.hadoop.mapred.InputSplit[] splits, FSDataOutputStream out,
        Configuration conf) throws IOException {
    SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
    if(splits.length != 0) {
        int i = 0;
        long offset = out.getPos();
        for(org.apache.hadoop.mapred.InputSplit split: splits) {
            long prevLen = out.getPos();
            Text.writeString(out, split.getClass().getName());
            split.write(out);
            long currLen = out.getPos();
            String[] locations = split.getLocations();
            final int max_loc = conf.getInt(MAX_SPLIT_LOCATIONS, 10);
            if(locations.length > max_loc) {
                LOG.warn("Max block location exceeded for split: " + split + " splitsize: " + locations.length
                        + " maxsize: " + max_loc);
                locations = Arrays.copyOf(locations, max_loc);
            }
            info[i++] = new JobSplit.SplitMetaInfo(locations, offset, split.getLength());
            offset += currLen - prevLen;
        }
    }
    return info;
}
 
开发者ID:ShifuML,项目名称:guagua,代码行数:25,代码来源:GuaguaSplitWriter.java

示例3: writeJobSplitMetaInfo

import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
private static void writeJobSplitMetaInfo(FileSystem fs, Path filename, FsPermission p, int splitMetaInfoVersion,
        JobSplit.SplitMetaInfo[] allSplitMetaInfo) throws IOException {
    // write the splits meta-info to a file for the job tracker
    FSDataOutputStream out = null;
    try {
        out = FileSystem.create(fs, filename, p);
        out.write(META_SPLIT_FILE_HEADER);
        WritableUtils.writeVInt(out, splitMetaInfoVersion);
        WritableUtils.writeVInt(out, allSplitMetaInfo.length);
        for(JobSplit.SplitMetaInfo splitMetaInfo: allSplitMetaInfo) {
            splitMetaInfo.write(out);
        }
    } finally {
        IOUtils.closeStream(out);
    }
}
 
开发者ID:ShifuML,项目名称:guagua,代码行数:17,代码来源:GuaguaSplitWriter.java

示例4: writeOldSplits

import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
private static SplitMetaInfo[] writeOldSplits(
    org.apache.hadoop.mapred.InputSplit[] splits,
    FSDataOutputStream out) throws IOException {
  SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
  if (splits.length != 0) {
    int i = 0;
    long offset = out.size();
    for(org.apache.hadoop.mapred.InputSplit split: splits) {
      int prevLen = out.size();
      Text.writeString(out, split.getClass().getName());
      split.write(out);
      int currLen = out.size();
      info[i++] = new JobSplit.SplitMetaInfo( 
          split.getLocations(), offset,
          split.getLength());
      offset += currLen - prevLen;
    }
  }
  return info;
}
 
开发者ID:karahiyo,项目名称:hanoi-hadoop-2.0.0-cdh,代码行数:21,代码来源:JobSplitWriter.java

示例5: createSplitFiles

import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
public static <T extends InputSplit> void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, T[] splits) 
throws IOException, InterruptedException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeNewSplits(conf, splits, out);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:JobSplitWriter.java

示例6: writeNewSplits

import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private static <T extends InputSplit> 
SplitMetaInfo[] writeNewSplits(Configuration conf, 
    T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {

  SplitMetaInfo[] info = new SplitMetaInfo[array.length];
  if (array.length != 0) {
    SerializationFactory factory = new SerializationFactory(conf);
    int i = 0;
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    long offset = out.getPos();
    for(T split: array) {
      long prevCount = out.getPos();
      Text.writeString(out, split.getClass().getName());
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) split.getClass());
      serializer.open(out);
      serializer.serialize(split);
      long currCount = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        LOG.warn("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
        locations = Arrays.copyOf(locations, maxBlockLocations);
      }
      info[i++] = 
        new JobSplit.SplitMetaInfo( 
            locations, offset,
            split.getLength());
      offset += currCount - prevCount;
    }
  }
  return info;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:JobSplitWriter.java

示例7: writeOldSplits

import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
private static SplitMetaInfo[] writeOldSplits(
    org.apache.hadoop.mapred.InputSplit[] splits,
    FSDataOutputStream out, Configuration conf) throws IOException {
  SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
  if (splits.length != 0) {
    int i = 0;
    long offset = out.getPos();
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    for(org.apache.hadoop.mapred.InputSplit split: splits) {
      long prevLen = out.getPos();
      Text.writeString(out, split.getClass().getName());
      split.write(out);
      long currLen = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        LOG.warn("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
        locations = Arrays.copyOf(locations,maxBlockLocations);
      }
      info[i++] = new JobSplit.SplitMetaInfo( 
          locations, offset,
          split.getLength());
      offset += currLen - prevLen;
    }
  }
  return info;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:JobSplitWriter.java

示例8: writeJobSplitMetaInfo

import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
private static void writeJobSplitMetaInfo(FileSystem fs, Path filename, 
    FsPermission p, int splitMetaInfoVersion, 
    JobSplit.SplitMetaInfo[] allSplitMetaInfo) 
throws IOException {
  // write the splits meta-info to a file for the job tracker
  FSDataOutputStream out = 
    FileSystem.create(fs, filename, p);
  out.write(JobSplit.META_SPLIT_FILE_HEADER);
  WritableUtils.writeVInt(out, splitMetaInfoVersion);
  WritableUtils.writeVInt(out, allSplitMetaInfo.length);
  for (JobSplit.SplitMetaInfo splitMetaInfo : allSplitMetaInfo) {
    splitMetaInfo.write(out);
  }
  out.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:JobSplitWriter.java

示例9: writeNewSplits

import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private static <T extends InputSplit> 
SplitMetaInfo[] writeNewSplits(Configuration conf, 
    T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {

  SplitMetaInfo[] info = new SplitMetaInfo[array.length];
  if (array.length != 0) {
    SerializationFactory factory = new SerializationFactory(conf);
    int i = 0;
    long offset = out.getPos();
    for(T split: array) {
      long prevCount = out.getPos();
      Text.writeString(out, split.getClass().getName());
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) split.getClass());
      serializer.open(out);
      serializer.serialize(split);
      long currCount = out.getPos();
      info[i++] = 
        new JobSplit.SplitMetaInfo( 
            split.getLocations(), offset,
            split.getLength());
      offset += currCount - prevCount;
    }
  }
  return info;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:29,代码来源:JobSplitWriter.java

示例10: writeNewSplits

import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private static <T extends InputSplit> 
SplitMetaInfo[] writeNewSplits(Configuration conf, 
    T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {

  SplitMetaInfo[] info = new SplitMetaInfo[array.length];
  if (array.length != 0) {
    SerializationFactory factory = new SerializationFactory(conf);
    int i = 0;
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    long offset = out.getPos();
    for(T split: array) {
      long prevCount = out.getPos();
      Text.writeString(out, split.getClass().getName());
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) split.getClass());
      serializer.open(out);
      serializer.serialize(split);
      long currCount = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        throw new IOException("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
      }
      info[i++] = 
        new JobSplit.SplitMetaInfo( 
            locations, offset,
            split.getLength());
      offset += currCount - prevCount;
    }
  }
  return info;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:37,代码来源:JobSplitWriter.java

示例11: writeOldSplits

import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo; //导入依赖的package包/类
private static SplitMetaInfo[] writeOldSplits(
    org.apache.hadoop.mapred.InputSplit[] splits,
    FSDataOutputStream out, Configuration conf) throws IOException {
  SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
  if (splits.length != 0) {
    int i = 0;
    long offset = out.getPos();
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    for(org.apache.hadoop.mapred.InputSplit split: splits) {
      long prevLen = out.getPos();
      Text.writeString(out, split.getClass().getName());
      split.write(out);
      long currLen = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        throw new IOException("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
      }
      info[i++] = new JobSplit.SplitMetaInfo( 
          locations, offset,
          split.getLength());
      offset += currLen - prevLen;
    }
  }
  return info;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:29,代码来源:JobSplitWriter.java


注:本文中的org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。