本文整理汇总了Java中org.apache.hadoop.fs.BlockLocation类的典型用法代码示例。如果您正苦于以下问题:Java BlockLocation类的具体用法?Java BlockLocation怎么用?Java BlockLocation使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
BlockLocation类属于org.apache.hadoop.fs包,在下文中一共展示了BlockLocation类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testGetBlockLocations
import org.apache.hadoop.fs.BlockLocation; //导入依赖的package包/类
@Test
public void testGetBlockLocations() throws IOException {
Path targetFilePath = new Path(targetTestRoot,"data/largeFile");
FileContextTestHelper.createFile(fcTarget, targetFilePath, 10, 1024);
Path viewFilePath = new Path("/data/largeFile");
checkFileStatus(fcView, viewFilePath.toString(), fileType.isFile);
BlockLocation[] viewBL = fcView.getFileBlockLocations(viewFilePath,
0, 10240+100);
Assert.assertEquals(SupportsBlocks ? 10 : 1, viewBL.length);
BlockLocation[] targetBL = fcTarget.getFileBlockLocations(targetFilePath, 0, 10240+100);
compareBLs(viewBL, targetBL);
// Same test but now get it via the FileStatus Parameter
fcView.getFileBlockLocations(viewFilePath, 0, 10240+100);
targetBL = fcTarget.getFileBlockLocations(targetFilePath, 0, 10240+100);
compareBLs(viewBL, targetBL);
}
示例2: buildBlockMap
import org.apache.hadoop.fs.BlockLocation; //导入依赖的package包/类
/**
* Builds a mapping of block locations to file byte range
*/
private ImmutableRangeMap<Long,BlockLocation> buildBlockMap(FileStatus status) throws IOException {
final Timer.Context context = metrics.timer(BLOCK_MAP_BUILDER_TIMER).time();
BlockLocation[] blocks;
ImmutableRangeMap<Long,BlockLocation> blockMap;
blocks = fs.getFileBlockLocations(status, 0 , status.getLen());
ImmutableRangeMap.Builder<Long, BlockLocation> blockMapBuilder = new ImmutableRangeMap.Builder<Long,BlockLocation>();
for (BlockLocation block : blocks) {
long start = block.getOffset();
long end = start + block.getLength();
Range<Long> range = Range.closedOpen(start, end);
blockMapBuilder = blockMapBuilder.put(range, block);
}
blockMap = blockMapBuilder.build();
blockMapMap.put(status.getPath(), blockMap);
context.stop();
return blockMap;
}
示例3: getHostAffinity
import org.apache.hadoop.fs.BlockLocation; //导入依赖的package包/类
/**
* Get the host affinity for a row group
* @param fileStatus the parquet file
* @param start the start of the row group
* @param length the length of the row group
* @return
* @throws IOException
*/
private Map<String,Float> getHostAffinity(FileStatus fileStatus, long start, long length) throws IOException {
BlockLocation[] blockLocations = fs.getFileBlockLocations(fileStatus, start, length);
Map<String,Float> hostAffinityMap = Maps.newHashMap();
for (BlockLocation blockLocation : blockLocations) {
for (String host : blockLocation.getHosts()) {
Float currentAffinity = hostAffinityMap.get(host);
float blockStart = blockLocation.getOffset();
float blockEnd = blockStart + blockLocation.getLength();
float rowGroupEnd = start + length;
Float newAffinity = (blockLocation.getLength() - (blockStart < start ? start - blockStart : 0) -
(blockEnd > rowGroupEnd ? blockEnd - rowGroupEnd : 0)) / length;
if (currentAffinity != null) {
hostAffinityMap.put(host, currentAffinity + newAffinity);
} else {
hostAffinityMap.put(host, newAffinity);
}
}
}
return hostAffinityMap;
}
示例4: buildBlockLocations2
import org.apache.hadoop.fs.BlockLocation; //导入依赖的package包/类
public BlockLocation[] buildBlockLocations2(String[] hosts, long blockSize) {
String[] names = new String[hosts.length];
for (int i = 0; i < hosts.length; i++) {
hosts[i] = "host" + i;
names[i] = "host:" + port;
}
BlockLocation[] blockLocations = new BlockLocation[4];
blockLocations[0] = new BlockLocation(new String[]{names[0]}, new String[]{hosts[0]}, 0, blockSize);
blockLocations[1] = new BlockLocation(new String[]{names[1]}, new String[]{hosts[1]}, blockSize, blockSize);
blockLocations[3] = new BlockLocation(new String[]{names[3]}, new String[]{hosts[3]}, blockSize*2, blockSize);
blockLocations[2] = new BlockLocation(new String[]{names[2]}, new String[]{hosts[2]}, blockSize*3, blockSize);
return blockLocations;
}
示例5: testGetBlockLocations
import org.apache.hadoop.fs.BlockLocation; //导入依赖的package包/类
@Test
public void testGetBlockLocations() throws IOException {
Path targetFilePath = new Path(targetTestRoot,"data/largeFile");
FileSystemTestHelper.createFile(fsTarget,
targetFilePath, 10, 1024);
Path viewFilePath = new Path("/data/largeFile");
Assert.assertTrue("Created File should be type File",
fsView.isFile(viewFilePath));
BlockLocation[] viewBL = fsView.getFileBlockLocations(fsView.getFileStatus(viewFilePath), 0, 10240+100);
Assert.assertEquals(SupportsBlocks ? 10 : 1, viewBL.length);
BlockLocation[] targetBL = fsTarget.getFileBlockLocations(fsTarget.getFileStatus(targetFilePath), 0, 10240+100);
compareBLs(viewBL, targetBL);
// Same test but now get it via the FileStatus Parameter
fsView.getFileBlockLocations(
fsView.getFileStatus(viewFilePath), 0, 10240+100);
targetBL = fsTarget.getFileBlockLocations(
fsTarget.getFileStatus(targetFilePath), 0, 10240+100);
compareBLs(viewBL, targetBL);
}
示例6: listStatus
import org.apache.hadoop.fs.BlockLocation; //导入依赖的package包/类
@Override
protected FileStatus[] listStatus(JobConf job) throws IOException {
FileStatus mockFileStatus = mock(FileStatus.class);
when(mockFileStatus.getBlockSize()).thenReturn(splitSize);
when(mockFileStatus.isDirectory()).thenReturn(false);
Path mockPath = mock(Path.class);
FileSystem mockFs = mock(FileSystem.class);
BlockLocation[] blockLocations = mockBlockLocations(length, splitSize);
when(mockFs.getFileBlockLocations(mockFileStatus, 0, length)).thenReturn(
blockLocations);
when(mockPath.getFileSystem(any(Configuration.class))).thenReturn(mockFs);
when(mockFileStatus.getPath()).thenReturn(mockPath);
when(mockFileStatus.getLen()).thenReturn(length);
FileStatus[] fs = new FileStatus[1];
fs[0] = mockFileStatus;
return fs;
}
示例7: getFileBlockLocations
import org.apache.hadoop.fs.BlockLocation; //导入依赖的package包/类
@Override
public BlockLocation[] getFileBlockLocations(
FileStatus stat, long start, long len) throws IOException {
if (stat.isDir()) {
return null;
}
System.out.println("File " + stat.getPath());
String name = stat.getPath().toUri().getPath();
BlockLocation[] locs =
super.getFileBlockLocations(stat, start, len);
if (name.equals(fileWithMissingBlocks)) {
System.out.println("Returning missing blocks for " + fileWithMissingBlocks);
locs[0] = new HdfsBlockLocation(new BlockLocation(new String[0],
new String[0], locs[0].getOffset(), locs[0].getLength()), null);
}
return locs;
}
示例8: convertToVolumeBlockLocations
import org.apache.hadoop.fs.BlockLocation; //导入依赖的package包/类
/**
* Helper method to combine a list of {@link LocatedBlock} with associated
* {@link VolumeId} information to form a list of {@link BlockStorageLocation}
* .
*/
static BlockStorageLocation[] convertToVolumeBlockLocations(
List<LocatedBlock> blocks,
Map<LocatedBlock, List<VolumeId>> blockVolumeIds) throws IOException {
// Construct the final return value of VolumeBlockLocation[]
BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks);
List<BlockStorageLocation> volumeBlockLocs =
new ArrayList<BlockStorageLocation>(locations.length);
for (int i = 0; i < locations.length; i++) {
LocatedBlock locBlock = blocks.get(i);
List<VolumeId> volumeIds = blockVolumeIds.get(locBlock);
BlockStorageLocation bsLoc = new BlockStorageLocation(locations[i],
volumeIds.toArray(new VolumeId[0]));
volumeBlockLocs.add(bsLoc);
}
return volumeBlockLocs.toArray(new BlockStorageLocation[] {});
}
示例9: checkFile
import org.apache.hadoop.fs.BlockLocation; //导入依赖的package包/类
private void checkFile(FileSystem fileSys, Path name) throws IOException {
BlockLocation[] locations = fileSys.getFileBlockLocations(
fileSys.getFileStatus(name), 0, fileSize);
assertEquals("Number of blocks", fileSize, locations.length);
FSDataInputStream stm = fileSys.open(name);
byte[] expected = new byte[fileSize];
if (simulatedStorage) {
for (int i = 0; i < expected.length; ++i) {
expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
}
} else {
Random rand = new Random(seed);
rand.nextBytes(expected);
}
// do a sanity check. Read the file
byte[] actual = new byte[fileSize];
stm.readFully(0, actual);
checkAndEraseData(actual, 0, expected, "Read Sanity Test");
stm.close();
}
示例10: testGetFileBlockLocations
import org.apache.hadoop.fs.BlockLocation; //导入依赖的package包/类
public void testGetFileBlockLocations() throws IOException {
final String f = "/test/testGetFileBlockLocations";
createFile(path(f));
final BlockLocation[] computed = fs.getFileBlockLocations(new Path(f), 0L, 1L);
final BlockLocation[] expected = cluster.getFileSystem().getFileBlockLocations(
new Path(f), 0L, 1L);
assertEquals(expected.length, computed.length);
for (int i = 0; i < computed.length; i++) {
assertEquals(expected[i].toString(), computed[i].toString());
// Check names
String names1[] = expected[i].getNames();
String names2[] = computed[i].getNames();
Arrays.sort(names1);
Arrays.sort(names2);
Assert.assertArrayEquals("Names differ", names1, names2);
// Check topology
String topos1[] = expected[i].getTopologyPaths();
String topos2[] = computed[i].getTopologyPaths();
Arrays.sort(topos1);
Arrays.sort(topos2);
Assert.assertArrayEquals("Topology differs", topos1, topos2);
}
}
示例11: waitForBlocks
import org.apache.hadoop.fs.BlockLocation; //导入依赖的package包/类
private void waitForBlocks(FileSystem fileSys, Path name)
throws IOException {
// wait until we have at least one block in the file to read.
boolean done = false;
while (!done) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
done = true;
BlockLocation[] locations = fileSys.getFileBlockLocations(
fileSys.getFileStatus(name), 0, blockSize);
if (locations.length < 1) {
done = false;
continue;
}
}
}
示例12: testFavoredNodesEndToEnd
import org.apache.hadoop.fs.BlockLocation; //导入依赖的package包/类
@Test(timeout=180000)
public void testFavoredNodesEndToEnd() throws Exception {
//create 10 files with random preferred nodes
for (int i = 0; i < NUM_FILES; i++) {
Random rand = new Random(System.currentTimeMillis() + i);
//pass a new created rand so as to get a uniform distribution each time
//without too much collisions (look at the do-while loop in getDatanodes)
InetSocketAddress datanode[] = getDatanodes(rand);
Path p = new Path("/filename"+i);
FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
4096, (short)3, 4096L, null, datanode);
out.write(SOME_BYTES);
out.close();
BlockLocation[] locations = getBlockLocations(p);
//verify the files got created in the right nodes
for (BlockLocation loc : locations) {
String[] hosts = loc.getNames();
String[] hosts1 = getStringForInetSocketAddrs(datanode);
assertTrue(compareNodes(hosts, hosts1));
}
}
}
示例13: testFavoredNodesEndToEndForAppend
import org.apache.hadoop.fs.BlockLocation; //导入依赖的package包/类
@Test(timeout = 180000)
public void testFavoredNodesEndToEndForAppend() throws Exception {
// create 10 files with random preferred nodes
for (int i = 0; i < NUM_FILES; i++) {
Random rand = new Random(System.currentTimeMillis() + i);
// pass a new created rand so as to get a uniform distribution each time
// without too much collisions (look at the do-while loop in getDatanodes)
InetSocketAddress datanode[] = getDatanodes(rand);
Path p = new Path("/filename" + i);
// create and close the file.
dfs.create(p, FsPermission.getDefault(), true, 4096, (short) 3, 4096L,
null, null).close();
// re-open for append
FSDataOutputStream out = dfs.append(p, EnumSet.of(CreateFlag.APPEND),
4096, null, datanode);
out.write(SOME_BYTES);
out.close();
BlockLocation[] locations = getBlockLocations(p);
// verify the files got created in the right nodes
for (BlockLocation loc : locations) {
String[] hosts = loc.getNames();
String[] hosts1 = getStringForInetSocketAddrs(datanode);
assertTrue(compareNodes(hosts, hosts1));
}
}
}
示例14: writeFile
import org.apache.hadoop.fs.BlockLocation; //导入依赖的package包/类
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
long blocksBefore = stm.getPos() / BLOCK_SIZE;
TestFileCreation.writeFile(stm, BLOCK_SIZE);
// need to make sure the full block is completely flushed to the DataNodes
// (see FSOutputSummer#flush)
stm.flush();
int blocksAfter = 0;
// wait until the block is allocated by DataStreamer
BlockLocation[] locatedBlocks;
while(blocksAfter <= blocksBefore) {
locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
}
}
示例15: testRWJson
import org.apache.hadoop.fs.BlockLocation; //导入依赖的package包/类
/**
* Read and write some JSON
* @throws IOException
*/
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRWJson() throws IOException {
final String message = "{" +
" 'json': { 'i':43, 'b':true}," +
" 's':'string'" +
"}";
final Path filePath = new Path("/test/file.json");
writeTextFile(fs, filePath, message, false);
String readJson = readBytesToString(fs, filePath, message.length());
assertEquals(message,readJson);
//now find out where it is
FileStatus status = fs.getFileStatus(filePath);
BlockLocation[] locations = fs.getFileBlockLocations(status, 0, 10);
}