本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount方法的典型用法代码示例。如果您正苦于以下问题:Java LocatedBlocks.locatedBlockCount方法的具体用法?Java LocatedBlocks.locatedBlockCount怎么用?Java LocatedBlocks.locatedBlockCount使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.protocol.LocatedBlocks
的用法示例。
在下文中一共展示了LocatedBlocks.locatedBlockCount方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: collectFileSummary
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
private void collectFileSummary(String path, HdfsFileStatus file, Result res,
LocatedBlocks blocks) throws IOException {
long fileLen = file.getLen();
boolean isOpen = blocks.isUnderConstruction();
if (isOpen && !showOpenFiles) {
// We collect these stats about open files to report with default options
res.totalOpenFilesSize += fileLen;
res.totalOpenFilesBlocks += blocks.locatedBlockCount();
res.totalOpenFiles++;
return;
}
res.totalFiles++;
res.totalSize += fileLen;
res.totalBlocks += blocks.locatedBlockCount();
if (showOpenFiles && isOpen) {
out.print(path + " " + fileLen + " bytes, " +
blocks.locatedBlockCount() + " block(s), OPENFORWRITE: ");
} else if (showFiles) {
out.print(path + " " + fileLen + " bytes, " +
blocks.locatedBlockCount() + " block(s): ");
} else if (showprogress) {
out.print('.');
}
}
示例2: testAbandonBlock
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
@Test
/** Abandon a block while creating a file */
public void testAbandonBlock() throws IOException {
String src = FILE_NAME_PREFIX + "foo";
// Start writing a file but do not close it
FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)1, 512L);
for (int i = 0; i < 1024; i++) {
fout.write(123);
}
fout.hflush();
long fileId = ((DFSOutputStream)fout.getWrappedStream()).getFileId();
// Now abandon the last block
DFSClient dfsclient = DFSClientAdapter.getDFSClient(fs);
LocatedBlocks blocks =
dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
int orginalNumBlocks = blocks.locatedBlockCount();
LocatedBlock b = blocks.getLastLocatedBlock();
dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
dfsclient.clientName);
// call abandonBlock again to make sure the operation is idempotent
dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
dfsclient.clientName);
// And close the file
fout.close();
// Close cluster and check the block has been abandoned after restart
cluster.restartNameNode();
blocks = dfsclient.getNamenode().getBlockLocations(src, 0,
Integer.MAX_VALUE);
Assert.assertEquals("Blocks " + b + " has not been abandoned.",
orginalNumBlocks, blocks.locatedBlockCount() + 1);
}
示例3: getListing
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
* Get a partial listing of the indicated directory
*
* We will stop when any of the following conditions is met:
* 1) this.lsLimit files have been added
* 2) needLocation is true AND enough files have been added such
* that at least this.lsLimit block locations are in the response
*
* @param fsd FSDirectory
* @param iip the INodesInPath instance containing all the INodes along the
* path
* @param src the directory name
* @param startAfter the name to start listing after
* @param needLocation if block locations are returned
* @return a partial listing starting after startAfter
*/
private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
String src, byte[] startAfter, boolean needLocation, boolean isSuperUser)
throws IOException {
String srcs = FSDirectory.normalizePath(src);
final boolean isRawPath = FSDirectory.isReservedRawName(src);
fsd.readLock();
try {
if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
return getSnapshotsListing(fsd, srcs, startAfter);
}
final int snapshot = iip.getPathSnapshotId();
final INode targetNode = iip.getLastINode();
if (targetNode == null)
return null;
byte parentStoragePolicy = isSuperUser ?
targetNode.getStoragePolicyID() : BlockStoragePolicySuite
.ID_UNSPECIFIED;
if (!targetNode.isDirectory()) {
return new DirectoryListing(
new HdfsFileStatus[]{createFileStatus(fsd, src,
HdfsFileStatus.EMPTY_NAME, targetNode, needLocation,
parentStoragePolicy, snapshot, isRawPath, iip)}, 0);
}
final INodeDirectory dirInode = targetNode.asDirectory();
final ReadOnlyList<INode> contents = dirInode.getChildrenList(snapshot);
int startChild = INodeDirectory.nextChild(contents, startAfter);
int totalNumChildren = contents.size();
int numOfListing = Math.min(totalNumChildren - startChild,
fsd.getLsLimit());
int locationBudget = fsd.getLsLimit();
int listingCnt = 0;
HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
for (int i=0; i<numOfListing && locationBudget>0; i++) {
INode cur = contents.get(startChild+i);
byte curPolicy = isSuperUser && !cur.isSymlink()?
cur.getLocalStoragePolicyID():
BlockStoragePolicySuite.ID_UNSPECIFIED;
listing[i] = createFileStatus(fsd, src, cur.getLocalNameBytes(), cur,
needLocation, getStoragePolicyID(curPolicy,
parentStoragePolicy), snapshot, isRawPath, iip);
listingCnt++;
if (needLocation) {
// Once we hit lsLimit locations, stop.
// This helps to prevent excessively large response payloads.
// Approximate #locations with locatedBlockCount() * repl_factor
LocatedBlocks blks =
((HdfsLocatedFileStatus)listing[i]).getBlockLocations();
locationBudget -= (blks == null) ? 0 :
blks.locatedBlockCount() * listing[i].getReplication();
}
}
// truncate return array if necessary
if (listingCnt < numOfListing) {
listing = Arrays.copyOf(listing, listingCnt);
}
return new DirectoryListing(
listing, totalNumChildren-startChild-listingCnt);
} finally {
fsd.readUnlock();
}
}
示例4: chooseDatanode
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
final String path, final HttpOpParam.Op op, final long openOffset,
final long blocksize, final String excludeDatanodes) throws IOException {
final BlockManager bm = namenode.getNamesystem().getBlockManager();
HashSet<Node> excludes = new HashSet<Node>();
if (excludeDatanodes != null) {
for (String host : StringUtils
.getTrimmedStringCollection(excludeDatanodes)) {
int idx = host.indexOf(":");
if (idx != -1) {
excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
} else {
excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
}
}
}
if (op == PutOpParam.Op.CREATE) {
//choose a datanode near to client
final DatanodeDescriptor clientNode = bm.getDatanodeManager(
).getDatanodeByHost(getRemoteAddress());
if (clientNode != null) {
final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(
path, clientNode, excludes, blocksize);
if (storages.length > 0) {
return storages[0].getDatanodeDescriptor();
}
}
} else if (op == GetOpParam.Op.OPEN
|| op == GetOpParam.Op.GETFILECHECKSUM
|| op == PostOpParam.Op.APPEND) {
//choose a datanode containing a replica
final NamenodeProtocols np = getRPCServer(namenode);
final HdfsFileStatus status = np.getFileInfo(path);
if (status == null) {
throw new FileNotFoundException("File " + path + " not found.");
}
final long len = status.getLen();
if (op == GetOpParam.Op.OPEN) {
if (openOffset < 0L || (openOffset >= len && len > 0)) {
throw new IOException("Offset=" + openOffset
+ " out of the range [0, " + len + "); " + op + ", path=" + path);
}
}
if (len > 0) {
final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
final int count = locations.locatedBlockCount();
if (count > 0) {
return bestNode(locations.get(0).getLocations(), excludes);
}
}
}
return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
).chooseRandom(NodeBase.ROOT);
}
示例5: testFileCreationError2
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
* Test that the filesystem removes the last block from a file if its
* lease expires.
*/
@Test
public void testFileCreationError2() throws IOException {
long leasePeriod = 1000;
System.out.println("testFileCreationError2 start");
Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem dfs = null;
try {
cluster.waitActive();
dfs = cluster.getFileSystem();
DFSClient client = dfs.dfs;
// create a new file.
//
Path file1 = new Path("/filestatus.dat");
createFile(dfs, file1, 1);
System.out.println("testFileCreationError2: "
+ "Created file filestatus.dat with one replicas.");
LocatedBlocks locations = client.getNamenode().getBlockLocations(
file1.toString(), 0, Long.MAX_VALUE);
System.out.println("testFileCreationError2: "
+ "The file has " + locations.locatedBlockCount() + " blocks.");
// add one block to the file
LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
client.clientName, null, null, INodeId.GRANDFATHER_INODE_ID, null);
System.out.println("testFileCreationError2: "
+ "Added block " + location.getBlock());
locations = client.getNamenode().getBlockLocations(file1.toString(),
0, Long.MAX_VALUE);
int count = locations.locatedBlockCount();
System.out.println("testFileCreationError2: "
+ "The file now has " + count + " blocks.");
// set the soft and hard limit to be 1 second so that the
// namenode triggers lease recovery
cluster.setLeasePeriod(leasePeriod, leasePeriod);
// wait for the lease to expire
try {
Thread.sleep(5 * leasePeriod);
} catch (InterruptedException e) {
}
// verify that the last block was synchronized.
locations = client.getNamenode().getBlockLocations(file1.toString(),
0, Long.MAX_VALUE);
System.out.println("testFileCreationError2: "
+ "locations = " + locations.locatedBlockCount());
assertEquals(0, locations.locatedBlockCount());
System.out.println("testFileCreationError2 successful");
} finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
}
示例6: chooseDatanode
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
final String path, final HttpOpParam.Op op, final long openOffset,
final long blocksize, final String excludeDatanodes) throws IOException {
FSNamesystem fsn = namenode.getNamesystem();
if (fsn == null) {
throw new IOException("Namesystem has not been intialized yet.");
}
final BlockManager bm = fsn.getBlockManager();
HashSet<Node> excludes = new HashSet<Node>();
if (excludeDatanodes != null) {
for (String host : StringUtils
.getTrimmedStringCollection(excludeDatanodes)) {
int idx = host.indexOf(":");
if (idx != -1) {
excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
} else {
excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
}
}
}
if (op == PutOpParam.Op.CREATE) {
//choose a datanode near to client
final DatanodeDescriptor clientNode = bm.getDatanodeManager(
).getDatanodeByHost(getRemoteAddress());
if (clientNode != null) {
final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(
path, clientNode, excludes, blocksize);
if (storages.length > 0) {
return storages[0].getDatanodeDescriptor();
}
}
} else if (op == GetOpParam.Op.OPEN
|| op == GetOpParam.Op.GETFILECHECKSUM
|| op == PostOpParam.Op.APPEND) {
//choose a datanode containing a replica
final NamenodeProtocols np = getRPCServer(namenode);
final HdfsFileStatus status = np.getFileInfo(path);
if (status == null) {
throw new FileNotFoundException("File " + path + " not found.");
}
final long len = status.getLen();
if (op == GetOpParam.Op.OPEN) {
if (openOffset < 0L || (openOffset >= len && len > 0)) {
throw new IOException("Offset=" + openOffset
+ " out of the range [0, " + len + "); " + op + ", path=" + path);
}
}
if (len > 0) {
final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
final int count = locations.locatedBlockCount();
if (count > 0) {
return bestNode(locations.get(0).getLocations(), excludes);
}
}
}
return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
).chooseRandom(NodeBase.ROOT);
}
示例7: testFileCreationError2
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; //导入方法依赖的package包/类
/**
* Test that the filesystem removes the last block from a file if its
* lease expires.
*/
@Test
public void testFileCreationError2() throws IOException {
long leasePeriod = 1000;
System.out.println("testFileCreationError2 start");
Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem dfs = null;
try {
cluster.waitActive();
dfs = cluster.getFileSystem();
DFSClient client = dfs.dfs;
// create a new file.
//
Path file1 = new Path("/filestatus.dat");
createFile(dfs, file1, 1);
System.out.println("testFileCreationError2: "
+ "Created file filestatus.dat with one replicas.");
LocatedBlocks locations = client.getNamenode().getBlockLocations(
file1.toString(), 0, Long.MAX_VALUE);
System.out.println("testFileCreationError2: "
+ "The file has " + locations.locatedBlockCount() + " blocks.");
// add one block to the file
LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
client.clientName, null, null, HdfsConstants.GRANDFATHER_INODE_ID, null);
System.out.println("testFileCreationError2: "
+ "Added block " + location.getBlock());
locations = client.getNamenode().getBlockLocations(file1.toString(),
0, Long.MAX_VALUE);
int count = locations.locatedBlockCount();
System.out.println("testFileCreationError2: "
+ "The file now has " + count + " blocks.");
// set the soft and hard limit to be 1 second so that the
// namenode triggers lease recovery
cluster.setLeasePeriod(leasePeriod, leasePeriod);
// wait for the lease to expire
try {
Thread.sleep(5 * leasePeriod);
} catch (InterruptedException e) {
}
// verify that the last block was synchronized.
locations = client.getNamenode().getBlockLocations(file1.toString(),
0, Long.MAX_VALUE);
System.out.println("testFileCreationError2: "
+ "locations = " + locations.locatedBlockCount());
assertEquals(0, locations.locatedBlockCount());
System.out.println("testFileCreationError2 successful");
} finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
}