本文整理匯總了Java中org.apache.hadoop.mapreduce.lib.input.FileSplit.getLocations方法的典型用法代碼示例。如果您正苦於以下問題:Java FileSplit.getLocations方法的具體用法?Java FileSplit.getLocations怎麽用?Java FileSplit.getLocations使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.mapreduce.lib.input.FileSplit
的用法示例。
在下文中一共展示了FileSplit.getLocations方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: TeraScheduler
import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
public TeraScheduler(FileSplit[] realSplits,
Configuration conf) throws IOException {
this.realSplits = realSplits;
this.slotsPerHost = conf.getInt(TTConfig.TT_MAP_SLOTS, 4);
Map<String, Host> hostTable = new HashMap<String, Host>();
splits = new Split[realSplits.length];
for(FileSplit realSplit: realSplits) {
Split split = new Split(realSplit.getPath().toString());
splits[remainingSplits++] = split;
for(String hostname: realSplit.getLocations()) {
Host host = hostTable.get(hostname);
if (host == null) {
host = new Host(hostname);
hostTable.put(hostname, host);
hosts.add(host);
}
host.splits.add(split);
split.locations.add(host);
}
}
}
示例2: addSplit
import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
public void addSplit(FileSplit split)
throws IOException, InterruptedException {
splits.add(split);
length += split.getLength();
for (String loc : split.getLocations()) {
if (!locations.contains(loc)) {
locations.add(loc);
}
}
}
示例3: testSerialization
import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
@Test
public void testSerialization() throws IOException {
Path testPath = new Path("/foo/bar");
String[] hosts = new String[2];
hosts[0] = "abcd";
hosts[1] = "efgh";
FileSplit fSplit = new FileSplit(testPath, 1, 2, hosts);
WikipediaInputSplit split = new WikipediaInputSplit(fSplit, 7);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ObjectOutputStream out = new ObjectOutputStream(baos);
split.write(out);
out.close();
baos.close();
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
DataInput in = new ObjectInputStream(bais);
WikipediaInputSplit split2 = new WikipediaInputSplit();
split2.readFields(in);
Assert.assertTrue(bais.available() == 0);
bais.close();
Assert.assertTrue(split.getPartition() == split2.getPartition());
FileSplit fSplit2 = split2.getFileSplit();
Assert.assertTrue(fSplit.getPath().equals(fSplit2.getPath()));
Assert.assertTrue(fSplit.getStart() == fSplit2.getStart());
Assert.assertTrue(fSplit.getLength() == fSplit2.getLength());
String[] hosts2 = fSplit2.getLocations();
Assert.assertEquals(hosts.length, hosts2.length);
for (int i = 0; i < hosts.length; i++) {
Assert.assertEquals(hosts[i], hosts2[i]);
}
}
示例4: from
import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
public static CarbonInputSplit from(String segmentId, FileSplit split) throws IOException {
return new CarbonInputSplit(segmentId, split.getPath(), split.getStart(), split.getLength(),
split.getLocations());
}
示例5: FileSplitPartitionQuery
import org.apache.hadoop.mapreduce.lib.input.FileSplit; //導入方法依賴的package包/類
public FileSplitPartitionQuery(Query<K, T> baseQuery, FileSplit split)
throws IOException {
super(baseQuery, split.getLocations());
this.split = split;
}