本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols.getBlockLocations方法的典型用法代码示例。如果您正苦于以下问题:Java NamenodeProtocols.getBlockLocations方法的具体用法?Java NamenodeProtocols.getBlockLocations怎么用?Java NamenodeProtocols.getBlockLocations使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols
的用法示例。
在下文中一共展示了NamenodeProtocols.getBlockLocations方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testGetBlockLocations
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
* Test NameNode.getBlockLocations(..) on reading un-closed files.
*/
@Test
public void testGetBlockLocations() throws IOException {
final NamenodeProtocols namenode = cluster.getNameNodeRpc();
final Path p = new Path(BASE_DIR, "file2.dat");
final String src = p.toString();
final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);
// write a half block
int len = BLOCK_SIZE >>> 1;
writeFile(p, out, len);
for(int i = 1; i < NUM_BLOCKS; ) {
// verify consistency
final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
final List<LocatedBlock> blocks = lb.getLocatedBlocks();
assertEquals(i, blocks.size());
final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
assertTrue(b instanceof BlockInfoContiguousUnderConstruction);
if (++i < NUM_BLOCKS) {
// write one more block
writeFile(p, out, BLOCK_SIZE);
len += BLOCK_SIZE;
}
}
// close file
out.close();
}
示例2: testGetBlockLocations
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
* Test NameNode.getBlockLocations(..) on reading un-closed files.
*/
@Test
public void testGetBlockLocations() throws IOException {
final NamenodeProtocols namenode = cluster.getNameNodeRpc();
final BlockManager blockManager = cluster.getNamesystem().getBlockManager();
final Path p = new Path(BASE_DIR, "file2.dat");
final String src = p.toString();
final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);
// write a half block
int len = BLOCK_SIZE >>> 1;
writeFile(p, out, len);
for(int i = 1; i < NUM_BLOCKS; ) {
// verify consistency
final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
final List<LocatedBlock> blocks = lb.getLocatedBlocks();
assertEquals(i, blocks.size());
final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
assertFalse(blockManager.getStoredBlock(b).isComplete());
if (++i < NUM_BLOCKS) {
// write one more block
writeFile(p, out, BLOCK_SIZE);
len += BLOCK_SIZE;
}
}
// close file
out.close();
}
示例3: testGetBlockLocations
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
* Test NameNode.getBlockLocations(..) on reading un-closed files.
*/
@Test
public void testGetBlockLocations() throws IOException {
final NamenodeProtocols namenode = cluster.getNameNodeRpc();
final Path p = new Path(BASE_DIR, "file2.dat");
final String src = p.toString();
final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);
// write a half block
int len = BLOCK_SIZE >>> 1;
writeFile(p, out, len);
for(int i = 1; i < NUM_BLOCKS; ) {
// verify consistency
final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
final List<LocatedBlock> blocks = lb.getLocatedBlocks();
assertEquals(i, blocks.size());
final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
assertTrue(b instanceof BlockInfoUnderConstruction);
if (++i < NUM_BLOCKS) {
// write one more block
writeFile(p, out, BLOCK_SIZE);
len += BLOCK_SIZE;
}
}
// close file
out.close();
}
示例4: testGetBlockLocations
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
* Test NameNode.getBlockLocations(..) on reading un-closed files.
*/
@Test
public void testGetBlockLocations() throws IOException {
final NamenodeProtocols namenode = cluster.getNameNodeRpc();
final Path p = new Path(BASE_DIR, "file2.dat");
final String src = p.toString();
final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);
// write a half block
int len = BLOCK_SIZE >>> 1;
writeFile(p, out, len);
for (int i = 1; i < NUM_BLOCKS; ) {
// verify consistency
final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
final List<LocatedBlock> blocks = lb.getLocatedBlocks();
assertEquals(i, blocks.size());
final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
assertTrue(b instanceof BlockInfoUnderConstruction);
if (++i < NUM_BLOCKS) {
// write one more block
writeFile(p, out, BLOCK_SIZE);
len += BLOCK_SIZE;
}
}
// close file
out.close();
}
示例5: chooseDatanode
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
final String path, final HttpOpParam.Op op, final long openOffset,
final long blocksize, final String excludeDatanodes) throws IOException {
final BlockManager bm = namenode.getNamesystem().getBlockManager();
HashSet<Node> excludes = new HashSet<Node>();
if (excludeDatanodes != null) {
for (String host : StringUtils
.getTrimmedStringCollection(excludeDatanodes)) {
int idx = host.indexOf(":");
if (idx != -1) {
excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
} else {
excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
}
}
}
if (op == PutOpParam.Op.CREATE) {
//choose a datanode near to client
final DatanodeDescriptor clientNode = bm.getDatanodeManager(
).getDatanodeByHost(getRemoteAddress());
if (clientNode != null) {
final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(
path, clientNode, excludes, blocksize);
if (storages.length > 0) {
return storages[0].getDatanodeDescriptor();
}
}
} else if (op == GetOpParam.Op.OPEN
|| op == GetOpParam.Op.GETFILECHECKSUM
|| op == PostOpParam.Op.APPEND) {
//choose a datanode containing a replica
final NamenodeProtocols np = getRPCServer(namenode);
final HdfsFileStatus status = np.getFileInfo(path);
if (status == null) {
throw new FileNotFoundException("File " + path + " not found.");
}
final long len = status.getLen();
if (op == GetOpParam.Op.OPEN) {
if (openOffset < 0L || (openOffset >= len && len > 0)) {
throw new IOException("Offset=" + openOffset
+ " out of the range [0, " + len + "); " + op + ", path=" + path);
}
}
if (len > 0) {
final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
final int count = locations.locatedBlockCount();
if (count > 0) {
return bestNode(locations.get(0).getLocations(), excludes);
}
}
}
return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
).chooseRandom(NodeBase.ROOT);
}
示例6: testRetryAddBlockWhileInChooseTarget
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
* Retry addBlock() while another thread is in chooseTarget().
* See HDFS-4452.
*/
@Test
public void testRetryAddBlockWhileInChooseTarget() throws Exception {
final String src = "/testRetryAddBlockWhileInChooseTarget";
final FSNamesystem ns = cluster.getNamesystem();
final NamenodeProtocols nn = cluster.getNameNodeRpc();
// create file
nn.create(src, FsPermission.getFileDefault(),
"clientName",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
true, (short)3, 1024, null);
// start first addBlock()
LOG.info("Starting first addBlock for " + src);
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
DatanodeStorageInfo targets[] = ns.getNewBlockTargets(
src, INodeId.GRANDFATHER_INODE_ID, "clientName",
null, null, null, onRetryBlock);
assertNotNull("Targets must be generated", targets);
// run second addBlock()
LOG.info("Starting second addBlock for " + src);
nn.addBlock(src, "clientName", null, null,
INodeId.GRANDFATHER_INODE_ID, null);
assertTrue("Penultimate block must be complete",
checkFileProgress(src, false));
LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
LocatedBlock lb2 = lbs.get(0);
assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);
// continue first addBlock()
LocatedBlock newBlock = ns.storeAllocatedBlock(
src, INodeId.GRANDFATHER_INODE_ID, "clientName", null, targets);
assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());
// check locations
lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
LocatedBlock lb1 = lbs.get(0);
assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
}
示例7: chooseDatanode
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
final String path, final HttpOpParam.Op op, final long openOffset,
final long blocksize, final String excludeDatanodes) throws IOException {
FSNamesystem fsn = namenode.getNamesystem();
if (fsn == null) {
throw new IOException("Namesystem has not been intialized yet.");
}
final BlockManager bm = fsn.getBlockManager();
HashSet<Node> excludes = new HashSet<Node>();
if (excludeDatanodes != null) {
for (String host : StringUtils
.getTrimmedStringCollection(excludeDatanodes)) {
int idx = host.indexOf(":");
if (idx != -1) {
excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
} else {
excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
}
}
}
if (op == PutOpParam.Op.CREATE) {
//choose a datanode near to client
final DatanodeDescriptor clientNode = bm.getDatanodeManager(
).getDatanodeByHost(getRemoteAddress());
if (clientNode != null) {
final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(
path, clientNode, excludes, blocksize);
if (storages.length > 0) {
return storages[0].getDatanodeDescriptor();
}
}
} else if (op == GetOpParam.Op.OPEN
|| op == GetOpParam.Op.GETFILECHECKSUM
|| op == PostOpParam.Op.APPEND) {
//choose a datanode containing a replica
final NamenodeProtocols np = getRPCServer(namenode);
final HdfsFileStatus status = np.getFileInfo(path);
if (status == null) {
throw new FileNotFoundException("File " + path + " not found.");
}
final long len = status.getLen();
if (op == GetOpParam.Op.OPEN) {
if (openOffset < 0L || (openOffset >= len && len > 0)) {
throw new IOException("Offset=" + openOffset
+ " out of the range [0, " + len + "); " + op + ", path=" + path);
}
}
if (len > 0) {
final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
final int count = locations.locatedBlockCount();
if (count > 0) {
return bestNode(locations.get(0).getLocations(), excludes);
}
}
}
return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
).chooseRandom(NodeBase.ROOT);
}
示例8: testRetryAddBlockWhileInChooseTarget
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
* Retry addBlock() while another thread is in chooseTarget().
* See HDFS-4452.
*/
@Test
public void testRetryAddBlockWhileInChooseTarget() throws Exception {
final String src = "/testRetryAddBlockWhileInChooseTarget";
final FSNamesystem ns = cluster.getNamesystem();
final NamenodeProtocols nn = cluster.getNameNodeRpc();
// create file
nn.create(src, FsPermission.getFileDefault(),
"clientName",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
true, (short)3, 1024, null);
// start first addBlock()
LOG.info("Starting first addBlock for " + src);
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
ns.readLock();
FSDirWriteFileOp.ValidateAddBlockResult r;
FSPermissionChecker pc = Mockito.mock(FSPermissionChecker.class);
try {
r = FSDirWriteFileOp.validateAddBlock(ns, pc, src,
HdfsConstants.GRANDFATHER_INODE_ID,
"clientName", null, onRetryBlock);
} finally {
ns.readUnlock();;
}
DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock(
ns.getBlockManager(), src, null, null, r);
assertNotNull("Targets must be generated", targets);
// run second addBlock()
LOG.info("Starting second addBlock for " + src);
nn.addBlock(src, "clientName", null, null,
HdfsConstants.GRANDFATHER_INODE_ID, null);
assertTrue("Penultimate block must be complete",
checkFileProgress(src, false));
LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
LocatedBlock lb2 = lbs.get(0);
assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);
// continue first addBlock()
ns.writeLock();
LocatedBlock newBlock;
try {
newBlock = FSDirWriteFileOp.storeAllocatedBlock(ns, src,
HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, targets);
} finally {
ns.writeUnlock();
}
assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());
// check locations
lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
LocatedBlock lb1 = lbs.get(0);
assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
}
示例9: chooseDatanode
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
static DatanodeInfo chooseDatanode(final NameNode namenode,
final String path, final HttpOpParam.Op op, final long openOffset,
final long blocksize, Configuration conf) throws IOException {
final BlockManager bm = namenode.getNamesystem().getBlockManager();
if (op == PutOpParam.Op.CREATE) {
//choose a datanode near to client
final DatanodeDescriptor clientNode = bm.getDatanodeManager(
).getDatanodeByHost(getRemoteAddress());
if (clientNode != null) {
final DatanodeDescriptor[] datanodes = bm.getBlockPlacementPolicy()
.chooseTarget(path, 1, clientNode,
new ArrayList<DatanodeDescriptor>(), false, null, blocksize);
if (datanodes.length > 0) {
return datanodes[0];
}
}
} else if (op == GetOpParam.Op.OPEN
|| op == GetOpParam.Op.GETFILECHECKSUM
|| op == PostOpParam.Op.APPEND) {
//choose a datanode containing a replica
final NamenodeProtocols np = namenode.getRpcServer();
final HdfsFileStatus status = np.getFileInfo(path);
if (status == null) {
throw new FileNotFoundException("File " + path + " not found.");
}
final long len = status.getLen();
if (op == GetOpParam.Op.OPEN) {
if (openOffset < 0L || (openOffset >= len && len > 0)) {
throw new IOException("Offset=" + openOffset
+ " out of the range [0, " + len + "); " + op + ", path=" + path);
}
}
if (len > 0) {
final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
final int count = locations.locatedBlockCount();
if (count > 0) {
return JspHelper.bestNode(locations.get(0).getLocations(), false, conf);
}
}
}
return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
).chooseRandom(NodeBase.ROOT);
}
示例10: chooseDatanode
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
static DatanodeInfo chooseDatanode(final NameNode namenode, final String path,
final HttpOpParam.Op op, final long openOffset, final long blocksize,
Configuration conf) throws IOException {
final BlockManager bm = namenode.getNamesystem().getBlockManager();
if (op == PutOpParam.Op.CREATE) {
//choose a datanode near to client
final DatanodeDescriptor clientNode =
bm.getDatanodeManager().getDatanodeByHost(getRemoteAddress());
if (clientNode != null) {
final DatanodeDescriptor[] datanodes = bm.getBlockPlacementPolicy()
.chooseTarget(path, 1, clientNode,
new ArrayList<DatanodeDescriptor>(), false, null, blocksize);
if (datanodes.length > 0) {
return datanodes[0];
}
}
} else if (op == GetOpParam.Op.OPEN ||
op == GetOpParam.Op.GETFILECHECKSUM || op == PostOpParam.Op.APPEND) {
//choose a datanode containing a replica
final NamenodeProtocols np = namenode.getRpcServer();
final HdfsFileStatus status = np.getFileInfo(path);
if (status == null) {
throw new FileNotFoundException("File " + path + " not found.");
}
final long len = status.getLen();
if (op == GetOpParam.Op.OPEN) {
if (openOffset < 0L || (openOffset >= len && len > 0)) {
throw new IOException(
"Offset=" + openOffset + " out of the range [0, " + len + "); " +
op + ", path=" + path);
}
}
if (len > 0) {
final long offset = op == GetOpParam.Op.OPEN ? openOffset : len - 1;
final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
final int count = locations.locatedBlockCount();
if (count > 0) {
return JspHelper
.bestNode(locations.get(0).getLocations(), false, conf);
}
}
}
return (DatanodeDescriptor) bm.getDatanodeManager().getNetworkTopology()
.chooseRandom(NodeBase.ROOT);
}