本文整理汇总了Java中org.apache.hadoop.fs.HdfsBlockLocation类的典型用法代码示例。如果您正苦于以下问题:Java HdfsBlockLocation类的具体用法?Java HdfsBlockLocation怎么用?Java HdfsBlockLocation使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
HdfsBlockLocation类属于org.apache.hadoop.fs包,在下文中一共展示了HdfsBlockLocation类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getFileBlockLocations
import org.apache.hadoop.fs.HdfsBlockLocation; //导入依赖的package包/类
@Override
public BlockLocation[] getFileBlockLocations(
FileStatus stat, long start, long len) throws IOException {
if (stat.isDir()) {
return null;
}
System.out.println("File " + stat.getPath());
String name = stat.getPath().toUri().getPath();
BlockLocation[] locs =
super.getFileBlockLocations(stat, start, len);
if (name.equals(fileWithMissingBlocks)) {
System.out.println("Returning missing blocks for " + fileWithMissingBlocks);
locs[0] = new HdfsBlockLocation(new BlockLocation(new String[0],
new String[0], locs[0].getOffset(), locs[0].getLength()), null);
}
return locs;
}
示例2: getBlockLocations
import org.apache.hadoop.fs.HdfsBlockLocation; //导入依赖的package包/类
/**
* Get block location info about file
*
* getBlockLocations() returns a list of hostnames that store
* data for a specific file region. It returns a set of hostnames
* for every block within the indicated region.
*
* This function is very useful when writing code that considers
* data-placement when performing operations. For example, the
* MapReduce system tries to schedule tasks on the same machines
* as the data-block the task processes.
*/
public BlockLocation[] getBlockLocations(String src, long start,
long length) throws IOException, UnresolvedLinkException {
TraceScope scope = getPathTraceScope("getBlockLocations", src);
try {
LocatedBlocks blocks = getLocatedBlocks(src, start, length);
BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks);
HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length];
for (int i = 0; i < locations.length; i++) {
hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));
}
return hdfsLocations;
} finally {
scope.close();
}
}
示例3: testUncacheUnknownBlock
import org.apache.hadoop.fs.HdfsBlockLocation; //导入依赖的package包/类
@Test(timeout=60000)
public void testUncacheUnknownBlock() throws Exception {
// Create a file
Path fileName = new Path("/testUncacheUnknownBlock");
int fileLen = 4096;
DFSTestUtil.createFile(fs, fileName, fileLen, (short)1, 0xFDFD);
HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations(
fileName, 0, fileLen);
// Try to uncache it without caching it first
setHeartbeatResponse(uncacheBlocks(locs));
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return fsd.getNumBlocksFailedToUncache() > 0;
}
}, 100, 10000);
}
示例4: testPageRounder
import org.apache.hadoop.fs.HdfsBlockLocation; //导入依赖的package包/类
@Test(timeout=60000)
public void testPageRounder() throws Exception {
// Write a small file
Path fileName = new Path("/testPageRounder");
final int smallBlocks = 512; // This should be smaller than the page size
assertTrue("Page size should be greater than smallBlocks!",
PAGE_SIZE > smallBlocks);
final int numBlocks = 5;
final int fileLen = smallBlocks * numBlocks;
FSDataOutputStream out =
fs.create(fileName, false, 4096, (short)1, smallBlocks);
out.write(new byte[fileLen]);
out.close();
HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations(
fileName, 0, fileLen);
// Cache the file and check the sizes match the page size
setHeartbeatResponse(cacheBlocks(locs));
DFSTestUtil.verifyExpectedCacheUsage(PAGE_SIZE * numBlocks, numBlocks, fsd);
// Uncache and check that it decrements by the page size too
setHeartbeatResponse(uncacheBlocks(locs));
DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
}
示例5: getBlockSizes
import org.apache.hadoop.fs.HdfsBlockLocation; //导入依赖的package包/类
private static long[] getBlockSizes(HdfsBlockLocation[] locs)
throws Exception {
long[] sizes = new long[locs.length];
for (int i=0; i<locs.length; i++) {
HdfsBlockLocation loc = locs[i];
String bpid = loc.getLocatedBlock().getBlock().getBlockPoolId();
Block block = loc.getLocatedBlock().getBlock().getLocalBlock();
ExtendedBlock extBlock = new ExtendedBlock(bpid, block);
FileInputStream blockInputStream = null;
FileChannel blockChannel = null;
try {
blockInputStream =
(FileInputStream)fsd.getBlockInputStream(extBlock, 0);
blockChannel = blockInputStream.getChannel();
sizes[i] = blockChannel.size();
} finally {
IOUtils.cleanup(LOG, blockChannel, blockInputStream);
}
}
return sizes;
}
示例6: getResponse
import org.apache.hadoop.fs.HdfsBlockLocation; //导入依赖的package包/类
/**
* Creates a cache or uncache DatanodeCommand from an array of locations
*/
private static DatanodeCommand getResponse(HdfsBlockLocation[] locs,
int action) {
String bpid = locs[0].getLocatedBlock().getBlock().getBlockPoolId();
long[] blocks = new long[locs.length];
for (int i=0; i<locs.length; i++) {
blocks[i] = locs[i].getLocatedBlock().getBlock().getBlockId();
}
return new BlockIdCommand(action, bpid, blocks);
}
示例7: getBlockLocations
import org.apache.hadoop.fs.HdfsBlockLocation; //导入依赖的package包/类
/**
* Get block location info about file
*
* getBlockLocations() returns a list of hostnames that store
* data for a specific file region. It returns a set of hostnames
* for every block within the indicated region.
*
* This function is very useful when writing code that considers
* data-placement when performing operations. For example, the
* MapReduce system tries to schedule tasks on the same machines
* as the data-block the task processes.
*/
public BlockLocation[] getBlockLocations(String src, long start,
long length) throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("getBlockLocations", src)) {
LocatedBlocks blocks = getLocatedBlocks(src, start, length);
BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
HdfsBlockLocation[] hdfsLocations =
new HdfsBlockLocation[locations.length];
for (int i = 0; i < locations.length; i++) {
hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));
}
return hdfsLocations;
}
}
示例8: getBlockSizes
import org.apache.hadoop.fs.HdfsBlockLocation; //导入依赖的package包/类
private static long[] getBlockSizes(HdfsBlockLocation[] locs)
throws Exception {
long[] sizes = new long[locs.length];
for (int i=0; i<locs.length; i++) {
HdfsBlockLocation loc = locs[i];
String bpid = loc.getLocatedBlock().getBlock().getBlockPoolId();
Block block = loc.getLocatedBlock().getBlock().getLocalBlock();
ExtendedBlock extBlock = new ExtendedBlock(bpid, block);
FileChannel blockChannel =
((FileInputStream)fsd.getBlockInputStream(extBlock, 0)).getChannel();
sizes[i] = blockChannel.size();
}
return sizes;
}
示例9: getFirstBlockId
import org.apache.hadoop.fs.HdfsBlockLocation; //导入依赖的package包/类
private static String getFirstBlockId(FileSystem fileSystem, Path realFile) throws IOException {
FileStatus fileStatus = fileSystem.getFileStatus(realFile);
BlockLocation[] locations = fileSystem.getFileBlockLocations(fileStatus, 0, 1);
HdfsBlockLocation location = (HdfsBlockLocation) locations[0];
LocatedBlock locatedBlock = location.getLocatedBlock();
ExtendedBlock block = locatedBlock.getBlock();
return toNiceString(block.getBlockId());
}
示例10: getBlockStorageLocations
import org.apache.hadoop.fs.HdfsBlockLocation; //导入依赖的package包/类
/**
* Get block location information about a list of {@link HdfsBlockLocation}.
* Used by {@link DistributedFileSystem#getFileBlockStorageLocations(List)} to
* get {@link BlockStorageLocation}s for blocks returned by
* {@link DistributedFileSystem#getFileBlockLocations(org.apache.hadoop.fs.FileStatus, long, long)}
* .
*
* This is done by making a round of RPCs to the associated datanodes, asking
* the volume of each block replica. The returned array of
* {@link BlockStorageLocation} expose this information as a
* {@link VolumeId}.
*
* @param blockLocations
* target blocks on which to query volume location information
* @return volumeBlockLocations original block array augmented with additional
* volume location information for each replica.
*/
public BlockStorageLocation[] getBlockStorageLocations(
List<BlockLocation> blockLocations) throws IOException,
UnsupportedOperationException, InvalidBlockTokenException {
if (!getConf().getHdfsBlocksMetadataEnabled) {
throw new UnsupportedOperationException("Datanode-side support for " +
"getVolumeBlockLocations() must also be enabled in the client " +
"configuration.");
}
// Downcast blockLocations and fetch out required LocatedBlock(s)
List<LocatedBlock> blocks = new ArrayList<LocatedBlock>();
for (BlockLocation loc : blockLocations) {
if (!(loc instanceof HdfsBlockLocation)) {
throw new ClassCastException("DFSClient#getVolumeBlockLocations " +
"expected to be passed HdfsBlockLocations");
}
HdfsBlockLocation hdfsLoc = (HdfsBlockLocation) loc;
blocks.add(hdfsLoc.getLocatedBlock());
}
// Re-group the LocatedBlocks to be grouped by datanodes, with the values
// a list of the LocatedBlocks on the datanode.
Map<DatanodeInfo, List<LocatedBlock>> datanodeBlocks =
new LinkedHashMap<DatanodeInfo, List<LocatedBlock>>();
for (LocatedBlock b : blocks) {
for (DatanodeInfo info : b.getLocations()) {
if (!datanodeBlocks.containsKey(info)) {
datanodeBlocks.put(info, new ArrayList<LocatedBlock>());
}
List<LocatedBlock> l = datanodeBlocks.get(info);
l.add(b);
}
}
// Make RPCs to the datanodes to get volume locations for its replicas
TraceScope scope =
Trace.startSpan("getBlockStorageLocations", traceSampler);
Map<DatanodeInfo, HdfsBlocksMetadata> metadatas;
try {
metadatas = BlockStorageLocationUtil.
queryDatanodesForHdfsBlocksMetadata(conf, datanodeBlocks,
getConf().getFileBlockStorageLocationsNumThreads,
getConf().getFileBlockStorageLocationsTimeoutMs,
getConf().connectToDnViaHostname);
if (LOG.isTraceEnabled()) {
LOG.trace("metadata returned: "
+ Joiner.on("\n").withKeyValueSeparator("=").join(metadatas));
}
} finally {
scope.close();
}
// Regroup the returned VolumeId metadata to again be grouped by
// LocatedBlock rather than by datanode
Map<LocatedBlock, List<VolumeId>> blockVolumeIds = BlockStorageLocationUtil
.associateVolumeIdsWithBlocks(blocks, metadatas);
// Combine original BlockLocations with new VolumeId information
BlockStorageLocation[] volumeBlockLocations = BlockStorageLocationUtil
.convertToVolumeBlockLocations(blocks, blockVolumeIds);
return volumeBlockLocations;
}