本文整理汇总了Java中org.apache.hadoop.mapreduce.lib.input.CombineFileSplit.getLengths方法的典型用法代码示例。如果您正苦于以下问题:Java CombineFileSplit.getLengths方法的具体用法?Java CombineFileSplit.getLengths怎么用?Java CombineFileSplit.getLengths使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapreduce.lib.input.CombineFileSplit
的用法示例。
在下文中一共展示了CombineFileSplit.getLengths方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initialize
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit; //导入方法依赖的package包/类
@Override
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException {
Configuration conf = context.getConfiguration();
CombineFileSplit cSplit = (CombineFileSplit) split;
Path[] path = cSplit.getPaths();
long[] start = cSplit.getStartOffsets();
long[] len = cSplit.getLengths();
FileSystem fs = cSplit.getPath(0).getFileSystem(conf);
long startTS = conf.getLong(RowInputFormat.START_TIME_MILLIS, 0l);
long endTS = conf.getLong(RowInputFormat.END_TIME_MILLIS, 0l);
this.splitIterator = HDFSSplitIterator.newInstance(fs, path, start, len, startTS, endTS);
instantiateGfxdLoner(conf);
}
示例2: CombinedFileRecordReader
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit; //导入方法依赖的package包/类
public CombinedFileRecordReader(InputFormat<K, V> inputFormat,
CombineFileSplit combineFileSplit,
TaskAttemptContext context)
{
this.inputFormat = inputFormat;
this.combineFileSplit = combineFileSplit;
this.context = context;
long[] lengths = combineFileSplit.getLengths();
long totalLength = 0;
for (long length : lengths)
totalLength += length;
fractionLength = new float[lengths.length];
for (int i = 0; i < lengths.length; i++)
fractionLength[i] = ((float) lengths[i]) / totalLength;
}
示例3: FileQueue
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit; //导入方法依赖的package包/类
/**
* @param split Description of input sources.
* @param conf Used to resolve FileSystem instances.
*/
public FileQueue(CombineFileSplit split, Configuration conf)
throws IOException {
this.conf = conf;
paths = split.getPaths();
startoffset = split.getStartOffsets();
lengths = split.getLengths();
nextSource();
}
示例4: initialize
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit; //导入方法依赖的package包/类
@Override
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
CombineFileSplit cSplit = (CombineFileSplit) split;
Path[] path = cSplit.getPaths();
long[] start = cSplit.getStartOffsets();
long[] len = cSplit.getLengths();
Configuration conf = context.getConfiguration();
FileSystem fs = cSplit.getPath(0).getFileSystem(conf);
this.splitIterator = HDFSSplitIterator.newInstance(fs, path, start, len, 0l, 0l);
}