本文整理汇总了Java中org.apache.hadoop.mapreduce.lib.input.FileSplit.getLocations方法的典型用法代码示例。如果您正苦于以下问题:Java FileSplit.getLocations方法的具体用法?Java FileSplit.getLocations怎么用?Java FileSplit.getLocations使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapreduce.lib.input.FileSplit
的用法示例。
在下文中一共展示了FileSplit.getLocations方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: TeraScheduler
import org.apache.hadoop.mapreduce.lib.input.FileSplit; //导入方法依赖的package包/类
public TeraScheduler(FileSplit[] realSplits,
Configuration conf) throws IOException {
this.realSplits = realSplits;
this.slotsPerHost = conf.getInt(TTConfig.TT_MAP_SLOTS, 4);
Map<String, Host> hostTable = new HashMap<String, Host>();
splits = new Split[realSplits.length];
for(FileSplit realSplit: realSplits) {
Split split = new Split(realSplit.getPath().toString());
splits[remainingSplits++] = split;
for(String hostname: realSplit.getLocations()) {
Host host = hostTable.get(hostname);
if (host == null) {
host = new Host(hostname);
hostTable.put(hostname, host);
hosts.add(host);
}
host.splits.add(split);
split.locations.add(host);
}
}
}
示例2: addSplit
import org.apache.hadoop.mapreduce.lib.input.FileSplit; //导入方法依赖的package包/类
public void addSplit(FileSplit split)
throws IOException, InterruptedException {
splits.add(split);
length += split.getLength();
for (String loc : split.getLocations()) {
if (!locations.contains(loc)) {
locations.add(loc);
}
}
}
示例3: testSerialization
import org.apache.hadoop.mapreduce.lib.input.FileSplit; //导入方法依赖的package包/类
@Test
public void testSerialization() throws IOException {
Path testPath = new Path("/foo/bar");
String[] hosts = new String[2];
hosts[0] = "abcd";
hosts[1] = "efgh";
FileSplit fSplit = new FileSplit(testPath, 1, 2, hosts);
WikipediaInputSplit split = new WikipediaInputSplit(fSplit, 7);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ObjectOutputStream out = new ObjectOutputStream(baos);
split.write(out);
out.close();
baos.close();
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
DataInput in = new ObjectInputStream(bais);
WikipediaInputSplit split2 = new WikipediaInputSplit();
split2.readFields(in);
Assert.assertTrue(bais.available() == 0);
bais.close();
Assert.assertTrue(split.getPartition() == split2.getPartition());
FileSplit fSplit2 = split2.getFileSplit();
Assert.assertTrue(fSplit.getPath().equals(fSplit2.getPath()));
Assert.assertTrue(fSplit.getStart() == fSplit2.getStart());
Assert.assertTrue(fSplit.getLength() == fSplit2.getLength());
String[] hosts2 = fSplit2.getLocations();
Assert.assertEquals(hosts.length, hosts2.length);
for (int i = 0; i < hosts.length; i++) {
Assert.assertEquals(hosts[i], hosts2[i]);
}
}
示例4: from
import org.apache.hadoop.mapreduce.lib.input.FileSplit; //导入方法依赖的package包/类
public static CarbonInputSplit from(String segmentId, FileSplit split) throws IOException {
return new CarbonInputSplit(segmentId, split.getPath(), split.getStart(), split.getLength(),
split.getLocations());
}
示例5: FileSplitPartitionQuery
import org.apache.hadoop.mapreduce.lib.input.FileSplit; //导入方法依赖的package包/类
public FileSplitPartitionQuery(Query<K, T> baseQuery, FileSplit split)
throws IOException {
super(baseQuery, split.getLocations());
this.split = split;
}