本文整理汇总了Java中org.apache.hadoop.fs.BlockLocation.getHosts方法的典型用法代码示例。如果您正苦于以下问题:Java BlockLocation.getHosts方法的具体用法?Java BlockLocation.getHosts怎么用?Java BlockLocation.getHosts使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.BlockLocation
的用法示例。
在下文中一共展示了BlockLocation.getHosts方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getHostAffinity
import org.apache.hadoop.fs.BlockLocation; //导入方法依赖的package包/类
/**
* Get the host affinity for a row group
* @param fileStatus the parquet file
* @param start the start of the row group
* @param length the length of the row group
* @return
* @throws IOException
*/
private Map<String,Float> getHostAffinity(FileStatus fileStatus, long start, long length) throws IOException {
BlockLocation[] blockLocations = fs.getFileBlockLocations(fileStatus, start, length);
Map<String,Float> hostAffinityMap = Maps.newHashMap();
for (BlockLocation blockLocation : blockLocations) {
for (String host : blockLocation.getHosts()) {
Float currentAffinity = hostAffinityMap.get(host);
float blockStart = blockLocation.getOffset();
float blockEnd = blockStart + blockLocation.getLength();
float rowGroupEnd = start + length;
Float newAffinity = (blockLocation.getLength() - (blockStart < start ? start - blockStart : 0) -
(blockEnd > rowGroupEnd ? blockEnd - rowGroupEnd : 0)) / length;
if (currentAffinity != null) {
hostAffinityMap.put(host, currentAffinity + newAffinity);
} else {
hostAffinityMap.put(host, newAffinity);
}
}
}
return hostAffinityMap;
}
示例2: getHostAffinity
import org.apache.hadoop.fs.BlockLocation; //导入方法依赖的package包/类
/**
* Get the host affinity for a row group
*
* @param fileStatus the parquet file
* @param start the start of the row group
* @param length the length of the row group
* @return
* @throws IOException
*/
private Map<String, Float> getHostAffinity(FileStatus fileStatus, long start, long length)
throws IOException {
BlockLocation[] blockLocations = fs.getFileBlockLocations(fileStatus, start, length);
Map<String, Float> hostAffinityMap = Maps.newHashMap();
for (BlockLocation blockLocation : blockLocations) {
for (String host : blockLocation.getHosts()) {
Float currentAffinity = hostAffinityMap.get(host);
float blockStart = blockLocation.getOffset();
float blockEnd = blockStart + blockLocation.getLength();
float rowGroupEnd = start + length;
Float newAffinity = (blockLocation.getLength() - (blockStart < start ? start - blockStart : 0) -
(blockEnd > rowGroupEnd ? blockEnd - rowGroupEnd : 0)) / length;
if (currentAffinity != null) {
hostAffinityMap.put(host, currentAffinity + newAffinity);
} else {
hostAffinityMap.put(host, newAffinity);
}
}
}
return hostAffinityMap;
}
示例3: main
import org.apache.hadoop.fs.BlockLocation; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
String uri = "hdfs://hadoop-master:9000/";
Configuration config = new Configuration();
FileSystem fs = FileSystem.get(URI.create(uri), config, "root");
FileStatus[] listStatus = fs.listStatus(new Path("/")); for (FileStatus file : listStatus) {
System.out.println("[" + (file.isFile() ? "file" : "dir") + "] " + file.getPath().getName());
}
RemoteIterator<LocatedFileStatus> listFiles = fs.listFiles(new Path("/"), true);
while (listFiles.hasNext()) {
LocatedFileStatus fileStatus = listFiles.next();
log.info("block size:{}",fileStatus.getBlockSize());
log.info("owner : {}", fileStatus.getOwner());
log.info("replication : {}" ,fileStatus.getReplication());
log.info("permission : {}", fileStatus.getPermission());
log.info("path name : {}",fileStatus.getPath().getName());
log.info("========block info=========");
BlockLocation[] blockLocations = fileStatus.getBlockLocations();
for (BlockLocation blockLocation : blockLocations){
log.info("block offset : {}",blockLocation.getOffset());
log.info("block length : {}",blockLocation.getLength());
String[] dataNodes = blockLocation.getHosts();
for (String dataNode : dataNodes){
log.info("dataNode :{}",dataNode);
}
}
}
}
示例4: assertLocationValid
import org.apache.hadoop.fs.BlockLocation; //导入方法依赖的package包/类
private void assertLocationValid(BlockLocation location) throws
IOException {
LOG.info(location);
String[] hosts = location.getHosts();
String[] names = location.getNames();
assertNotEqual("No hosts supplied for " + location, 0, hosts.length);
//for every host, there's a name.
assertEquals("Unequal names and hosts in " + location,
hosts.length, names.length);
assertEquals(SwiftProtocolConstants.BLOCK_LOCATION,
location.getNames()[0]);
assertEquals(SwiftProtocolConstants.TOPOLOGY_PATH,
location.getTopologyPaths()[0]);
}
示例5: computeLocality
import org.apache.hadoop.fs.BlockLocation; //导入方法依赖的package包/类
private void computeLocality(ParquetMetadata footer) throws ExecutionSetupException {
try {
BlockMetaData block = footer.getBlocks().get(readEntry.getRowGroupIndex());
BlockLocation[] blockLocations = fs.getFileBlockLocations(new Path(readEntry.getPath()), block.getStartingPos(), block.getCompressedSize());
String localHost = InetAddress.getLocalHost().getCanonicalHostName();
List<Range<Long>> intersectingRanges = new ArrayList<>();
Range<Long> rowGroupRange = Range.openClosed(block.getStartingPos(), block.getStartingPos() + block.getCompressedSize());
for (BlockLocation loc : blockLocations) {
for (String host : loc.getHosts()) {
if (host.equals(localHost)) {
intersectingRanges.add(Range.closedOpen(loc.getOffset(), loc.getOffset() + loc.getLength()).intersection(rowGroupRange));
}
}
}
long totalIntersect = 0;
for (Range<Long> range : intersectingRanges) {
totalIntersect += (range.upperEndpoint() - range.lowerEndpoint());
}
if (totalIntersect < block.getCompressedSize()) {
context.getStats().addLongStat(Metric.NUM_REMOTE_READERS, 1);
} else {
context.getStats().addLongStat(Metric.NUM_REMOTE_READERS, 0);
}
} catch (IOException e) {
throw new ExecutionSetupException(e);
}
}
示例6: computeHDFSBlocksDistribution
import org.apache.hadoop.fs.BlockLocation; //导入方法依赖的package包/类
/**
* Compute HDFS blocks distribution of a given file, or a portion of the file
* @param fs file system
* @param status file status of the file
* @param start start position of the portion
* @param length length of the portion
* @return The HDFS blocks distribution
*/
static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
final FileSystem fs, FileStatus status, long start, long length)
throws IOException {
HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
BlockLocation [] blockLocations =
fs.getFileBlockLocations(status, start, length);
for(BlockLocation bl : blockLocations) {
String [] hosts = bl.getHosts();
long len = bl.getLength();
blocksDistribution.addHostsAndBlockWeight(hosts, len);
}
return blocksDistribution;
}
示例7: splitFor
import org.apache.hadoop.fs.BlockLocation; //导入方法依赖的package包/类
/**
* @param inputDir Pool used to resolve block locations.
* @param bytes Target byte count
* @param nLocs Number of block locations per split.
* @return A set of files satisfying the byte count, with locations weighted
* to the dominating proportion of input bytes.
*/
CombineFileSplit splitFor(FilePool inputDir, long bytes, int nLocs)
throws IOException {
final ArrayList<Path> paths = new ArrayList<Path>();
final ArrayList<Long> start = new ArrayList<Long>();
final ArrayList<Long> length = new ArrayList<Long>();
final HashMap<String,Double> sb = new HashMap<String,Double>();
do {
paths.add(current.getPath());
start.add(currentStart);
final long fromFile = Math.min(bytes, current.getLen() - currentStart);
length.add(fromFile);
for (BlockLocation loc :
inputDir.locationsFor(current, currentStart, fromFile)) {
final double tedium = loc.getLength() / (1.0 * bytes);
for (String l : loc.getHosts()) {
Double j = sb.get(l);
if (null == j) {
sb.put(l, tedium);
} else {
sb.put(l, j.doubleValue() + tedium);
}
}
}
currentStart += fromFile;
bytes -= fromFile;
// Switch to a new file if
// - the current file is uncompressed and completely used
// - the current file is compressed
CompressionCodecFactory compressionCodecs =
new CompressionCodecFactory(conf);
CompressionCodec codec = compressionCodecs.getCodec(current.getPath());
if (current.getLen() - currentStart == 0
|| codec != null) {
current = files.get(++idx % files.size());
currentStart = 0;
}
} while (bytes > 0);
final ArrayList<Entry<String,Double>> sort =
new ArrayList<Entry<String,Double>>(sb.entrySet());
Collections.sort(sort, hostRank);
final String[] hosts = new String[Math.min(nLocs, sort.size())];
for (int i = 0; i < nLocs && i < sort.size(); ++i) {
hosts[i] = sort.get(i).getKey();
}
return new CombineFileSplit(paths.toArray(new Path[0]),
toLongArray(start), toLongArray(length), hosts);
}