本文整理汇总了Java中org.apache.hadoop.fs.BlockLocation.getLength方法的典型用法代码示例。如果您正苦于以下问题:Java BlockLocation.getLength方法的具体用法?Java BlockLocation.getLength怎么用?Java BlockLocation.getLength使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.BlockLocation
的用法示例。
在下文中一共展示了BlockLocation.getLength方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: buildBlockMap
import org.apache.hadoop.fs.BlockLocation; //导入方法依赖的package包/类
/**
* Builds a mapping of block locations to file byte range
*/
private ImmutableRangeMap<Long,BlockLocation> buildBlockMap(FileStatus status) throws IOException {
final Timer.Context context = metrics.timer(BLOCK_MAP_BUILDER_TIMER).time();
BlockLocation[] blocks;
ImmutableRangeMap<Long,BlockLocation> blockMap;
blocks = fs.getFileBlockLocations(status, 0 , status.getLen());
ImmutableRangeMap.Builder<Long, BlockLocation> blockMapBuilder = new ImmutableRangeMap.Builder<Long,BlockLocation>();
for (BlockLocation block : blocks) {
long start = block.getOffset();
long end = start + block.getLength();
Range<Long> range = Range.closedOpen(start, end);
blockMapBuilder = blockMapBuilder.put(range, block);
}
blockMap = blockMapBuilder.build();
blockMapMap.put(status.getPath(), blockMap);
context.stop();
return blockMap;
}
示例2: getHostAffinity
import org.apache.hadoop.fs.BlockLocation; //导入方法依赖的package包/类
/**
* Get the host affinity for a row group
* @param fileStatus the parquet file
* @param start the start of the row group
* @param length the length of the row group
* @return
* @throws IOException
*/
private Map<String,Float> getHostAffinity(FileStatus fileStatus, long start, long length) throws IOException {
BlockLocation[] blockLocations = fs.getFileBlockLocations(fileStatus, start, length);
Map<String,Float> hostAffinityMap = Maps.newHashMap();
for (BlockLocation blockLocation : blockLocations) {
for (String host : blockLocation.getHosts()) {
Float currentAffinity = hostAffinityMap.get(host);
float blockStart = blockLocation.getOffset();
float blockEnd = blockStart + blockLocation.getLength();
float rowGroupEnd = start + length;
Float newAffinity = (blockLocation.getLength() - (blockStart < start ? start - blockStart : 0) -
(blockEnd > rowGroupEnd ? blockEnd - rowGroupEnd : 0)) / length;
if (currentAffinity != null) {
hostAffinityMap.put(host, currentAffinity + newAffinity);
} else {
hostAffinityMap.put(host, newAffinity);
}
}
}
return hostAffinityMap;
}
示例3: buildBlockMap
import org.apache.hadoop.fs.BlockLocation; //导入方法依赖的package包/类
/**
* Builds a mapping of block locations to file byte range
*/
private ImmutableRangeMap<Long,BlockLocation> buildBlockMap(FileStatus status) throws IOException {
final Timer.Context context = metrics.timer(BLOCK_MAP_BUILDER_TIMER).time();
BlockLocation[] blocks;
ImmutableRangeMap<Long,BlockLocation> blockMap;
blocks = fs.getFileBlockLocations(status, 0 , status.getLen());
ImmutableRangeMap.Builder<Long, BlockLocation> blockMapBuilder = new ImmutableRangeMap.Builder<>();
for (BlockLocation block : blocks) {
long start = block.getOffset();
long end = start + block.getLength();
Range<Long> range = Range.closedOpen(start, end);
blockMapBuilder = blockMapBuilder.put(range, block);
}
blockMap = blockMapBuilder.build();
blockMapMap.put(status.getPath(), blockMap);
context.stop();
return blockMap;
}
示例4: getHostAffinity
import org.apache.hadoop.fs.BlockLocation; //导入方法依赖的package包/类
/**
* Get the host affinity for a row group
*
* @param fileStatus the parquet file
* @param start the start of the row group
* @param length the length of the row group
* @return
* @throws IOException
*/
private Map<String, Float> getHostAffinity(FileStatus fileStatus, long start, long length)
throws IOException {
BlockLocation[] blockLocations = fs.getFileBlockLocations(fileStatus, start, length);
Map<String, Float> hostAffinityMap = Maps.newHashMap();
for (BlockLocation blockLocation : blockLocations) {
for (String host : blockLocation.getHosts()) {
Float currentAffinity = hostAffinityMap.get(host);
float blockStart = blockLocation.getOffset();
float blockEnd = blockStart + blockLocation.getLength();
float rowGroupEnd = start + length;
Float newAffinity = (blockLocation.getLength() - (blockStart < start ? start - blockStart : 0) -
(blockEnd > rowGroupEnd ? blockEnd - rowGroupEnd : 0)) / length;
if (currentAffinity != null) {
hostAffinityMap.put(host, currentAffinity + newAffinity);
} else {
hostAffinityMap.put(host, newAffinity);
}
}
}
return hostAffinityMap;
}
示例5: testBuildRangeMap
import org.apache.hadoop.fs.BlockLocation; //导入方法依赖的package包/类
@Test
public void testBuildRangeMap() {
BlockLocation[] blocks = buildBlockLocations(new String[4], 256*1024*1024);
long tA = System.nanoTime();
ImmutableRangeMap.Builder<Long, BlockLocation> blockMapBuilder = new ImmutableRangeMap.Builder<Long,BlockLocation>();
for (BlockLocation block : blocks) {
long start = block.getOffset();
long end = start + block.getLength();
Range<Long> range = Range.closedOpen(start, end);
blockMapBuilder = blockMapBuilder.put(range, block);
}
ImmutableRangeMap<Long,BlockLocation> map = blockMapBuilder.build();
long tB = System.nanoTime();
System.out.println(String.format("Took %f ms to build range map", (float)(tB - tA) / 1e6));
}
示例6: getBlockIndex
import org.apache.hadoop.fs.BlockLocation; //导入方法依赖的package包/类
protected int getBlockIndex(BlockLocation[] blkLocations,
long offset) {
for (int i = 0 ; i < blkLocations.length; i++) {
// is the offset inside this block?
if ((blkLocations[i].getOffset() <= offset) &&
(offset < blkLocations[i].getOffset() + blkLocations[i].getLength())){
return i;
}
}
BlockLocation last = blkLocations[blkLocations.length -1];
long fileLength = last.getOffset() + last.getLength() -1;
throw new IllegalArgumentException("Offset " + offset +
" is outside of file (0.." +
fileLength + ")");
}
示例7: testBuildRangeMap
import org.apache.hadoop.fs.BlockLocation; //导入方法依赖的package包/类
@Test
public void testBuildRangeMap() {
BlockLocation[] blocks = buildBlockLocations(new String[4], 256*1024*1024);
long tA = System.nanoTime();
ImmutableRangeMap.Builder<Long, BlockLocation> blockMapBuilder = new ImmutableRangeMap.Builder<>();
for (BlockLocation block : blocks) {
long start = block.getOffset();
long end = start + block.getLength();
Range<Long> range = Range.closedOpen(start, end);
blockMapBuilder = blockMapBuilder.put(range, block);
}
ImmutableRangeMap<Long,BlockLocation> map = blockMapBuilder.build();
long tB = System.nanoTime();
System.out.println(String.format("Took %f ms to build range map", (tB - tA) / 1e6));
}
示例8: computeHDFSBlocksDistribution
import org.apache.hadoop.fs.BlockLocation; //导入方法依赖的package包/类
/**
* Compute HDFS blocks distribution of a given file, or a portion of the file
* @param fs file system
* @param status file status of the file
* @param start start position of the portion
* @param length length of the portion
* @return The HDFS blocks distribution
*/
static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
final FileSystem fs, FileStatus status, long start, long length)
throws IOException {
HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
BlockLocation [] blockLocations =
fs.getFileBlockLocations(status, start, length);
for(BlockLocation bl : blockLocations) {
String [] hosts = bl.getHosts();
long len = bl.getLength();
blocksDistribution.addHostsAndBlockWeight(hosts, len);
}
return blocksDistribution;
}
示例9: splitFor
import org.apache.hadoop.fs.BlockLocation; //导入方法依赖的package包/类
/**
* @param inputDir Pool used to resolve block locations.
* @param bytes Target byte count
* @param nLocs Number of block locations per split.
* @return A set of files satisfying the byte count, with locations weighted
* to the dominating proportion of input bytes.
*/
CombineFileSplit splitFor(FilePool inputDir, long bytes, int nLocs)
throws IOException {
final ArrayList<Path> paths = new ArrayList<Path>();
final ArrayList<Long> start = new ArrayList<Long>();
final ArrayList<Long> length = new ArrayList<Long>();
final HashMap<String,Double> sb = new HashMap<String,Double>();
do {
paths.add(current.getPath());
start.add(currentStart);
final long fromFile = Math.min(bytes, current.getLen() - currentStart);
length.add(fromFile);
for (BlockLocation loc :
inputDir.locationsFor(current, currentStart, fromFile)) {
final double tedium = loc.getLength() / (1.0 * bytes);
for (String l : loc.getHosts()) {
Double j = sb.get(l);
if (null == j) {
sb.put(l, tedium);
} else {
sb.put(l, j.doubleValue() + tedium);
}
}
}
currentStart += fromFile;
bytes -= fromFile;
// Switch to a new file if
// - the current file is uncompressed and completely used
// - the current file is compressed
CompressionCodecFactory compressionCodecs =
new CompressionCodecFactory(conf);
CompressionCodec codec = compressionCodecs.getCodec(current.getPath());
if (current.getLen() - currentStart == 0
|| codec != null) {
current = files.get(++idx % files.size());
currentStart = 0;
}
} while (bytes > 0);
final ArrayList<Entry<String,Double>> sort =
new ArrayList<Entry<String,Double>>(sb.entrySet());
Collections.sort(sort, hostRank);
final String[] hosts = new String[Math.min(nLocs, sort.size())];
for (int i = 0; i < nLocs && i < sort.size(); ++i) {
hosts[i] = sort.get(i).getKey();
}
return new CombineFileSplit(paths.toArray(new Path[0]),
toLongArray(start), toLongArray(length), hosts);
}