本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException类的典型用法代码示例。如果您正苦于以下问题:Java MaxDirectoryItemsExceededException类的具体用法?Java MaxDirectoryItemsExceededException怎么用?Java MaxDirectoryItemsExceededException使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MaxDirectoryItemsExceededException类属于org.apache.hadoop.hdfs.protocol.FSLimitException包,在下文中一共展示了MaxDirectoryItemsExceededException类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: verifyMaxDirItems
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; //导入依赖的package包/类
/**
* Verify children size for fs limit.
*
* @throws MaxDirectoryItemsExceededException too many children.
*/
void verifyMaxDirItems(INodeDirectory parent, String parentPath)
throws MaxDirectoryItemsExceededException {
final int count = parent.getChildrenList(CURRENT_STATE_ID).size();
if (count >= maxDirItems) {
final MaxDirectoryItemsExceededException e
= new MaxDirectoryItemsExceededException(maxDirItems, count);
if (namesystem.isImageLoaded()) {
e.setPathName(parentPath);
throw e;
} else {
// Do not throw if edits log is still being processed
NameNode.LOG.error("FSDirectory.verifyMaxDirItems: "
+ e.getLocalizedMessage());
}
}
}
示例2: testMaxDirItemsRename
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; //导入依赖的package包/类
@Test
public void testMaxDirItemsRename() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 2);
mkdirs("/1", null);
mkdirs("/2", null);
mkdirs("/2/A", null);
rename("/2/A", "/A", MaxDirectoryItemsExceededException.class);
rename("/2/A", "/1/A", null);
mkdirs("/2/B", null);
deprecatedRename("/2/B", "/B", MaxDirectoryItemsExceededException.class);
deprecatedRename("/2/B", "/1/B", null);
rename("/1", "/3", null);
deprecatedRename("/2", "/4", null);
}
示例3: verifyMaxDirItems
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; //导入依赖的package包/类
/**
* Verify children size for fs limit.
*
* @param pathComponents INode[] containing full path of inodes to new child
* @param pos int position of new child in pathComponents
* @throws MaxDirectoryItemsExceededException too many children.
*/
private void verifyMaxDirItems(INode[] pathComponents, int pos)
throws MaxDirectoryItemsExceededException {
final INodeDirectory parent = pathComponents[pos-1].asDirectory();
final int count = parent.getChildrenList(Snapshot.CURRENT_STATE_ID).size();
if (count >= maxDirItems) {
final MaxDirectoryItemsExceededException e
= new MaxDirectoryItemsExceededException(maxDirItems, count);
if (namesystem.isImageLoaded()) {
e.setPathName(getFullPathName(pathComponents, pos - 1));
throw e;
} else {
// Do not throw if edits log is still being processed
NameNode.LOG.error("FSDirectory.verifyMaxDirItems: "
+ e.getLocalizedMessage());
}
}
}
示例4: verifyMaxDirItems
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; //导入依赖的package包/类
/**
* Verify children size for fs limit.
* @throws MaxDirectoryItemsExceededException too many children.
*/
void verifyMaxDirItems(INode[] pathComponents, int pos)
throws MaxDirectoryItemsExceededException {
if (maxDirItems == 0) {
return;
}
final INodeDirectory parent = pathComponents[pos-1].asDirectory();
final int count = parent.getChildrenList(null).size();
if (count >= maxDirItems) {
final MaxDirectoryItemsExceededException e
= new MaxDirectoryItemsExceededException(maxDirItems, count);
if (ready) {
e.setPathName(getFullPathName(pathComponents, pos - 1));
throw e;
} else {
// Do not throw if edits log is still being processed
NameNode.LOG.error("FSDirectory.verifyMaxDirItems: "
+ e.getLocalizedMessage());
}
}
}
示例5: verifyMaxDirItems
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; //导入依赖的package包/类
/**
* Verify children size for fs limit.
*
* @param pathComponents INode[] containing full path of inodes to new child
* @param pos int position of new child in pathComponents
* @throws MaxDirectoryItemsExceededException too many children.
*/
private void verifyMaxDirItems(INode[] pathComponents, int pos)
throws MaxDirectoryItemsExceededException {
final INodeDirectory parent = pathComponents[pos-1].asDirectory();
final int count = parent.getChildrenList(Snapshot.CURRENT_STATE_ID).size();
if (count >= maxDirItems) {
final MaxDirectoryItemsExceededException e
= new MaxDirectoryItemsExceededException(maxDirItems, count);
if (ready) {
e.setPathName(getFullPathName(pathComponents, pos - 1));
throw e;
} else {
// Do not throw if edits log is still being processed
NameNode.LOG.error("FSDirectory.verifyMaxDirItems: "
+ e.getLocalizedMessage());
}
}
}
示例6: testMaxDirItems
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; //导入依赖的package包/类
@Test
public void testMaxDirItems() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 2);
mkdirs("/1", null);
mkdirs("/22", null);
mkdirs("/333", MaxDirectoryItemsExceededException.class);
mkdirs("/4444", MaxDirectoryItemsExceededException.class);
}
示例7: testMaxComponentsAndMaxDirItems
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; //导入依赖的package包/类
@Test
public void testMaxComponentsAndMaxDirItems() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 3);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 2);
mkdirs("/1", null);
mkdirs("/22", null);
mkdirs("/333", MaxDirectoryItemsExceededException.class);
mkdirs("/4444", PathComponentTooLongException.class);
}
示例8: verifyFsLimitsForRename
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; //导入依赖的package包/类
/**
* Checks file system limits (max component length and max directory items)
* during a rename operation.
*
* @param srcIIP INodesInPath containing every inode in the rename source
* @param dstIIP INodesInPath containing every inode in the rename destination
* @throws PathComponentTooLongException child's name is too long.
* @throws MaxDirectoryItemsExceededException too many children.
*/
private void verifyFsLimitsForRename(INodesInPath srcIIP, INodesInPath dstIIP)
throws PathComponentTooLongException, MaxDirectoryItemsExceededException {
byte[] dstChildName = dstIIP.getLastLocalName();
INode[] dstInodes = dstIIP.getINodes();
int pos = dstInodes.length - 1;
verifyMaxComponentLength(dstChildName, dstInodes, pos);
// Do not enforce max directory items if renaming within same directory.
if (srcIIP.getINode(-2) != dstIIP.getINode(-2)) {
verifyMaxDirItems(dstInodes, pos);
}
}
示例9: testMaxDirItems
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; //导入依赖的package包/类
@Test
public void testMaxDirItems() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 2);
addChildWithName("1", null);
addChildWithName("22", null);
addChildWithName("333", MaxDirectoryItemsExceededException.class);
addChildWithName("4444", MaxDirectoryItemsExceededException.class);
}
示例10: testMaxComponentsAndMaxDirItems
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; //导入依赖的package包/类
@Test
public void testMaxComponentsAndMaxDirItems() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 3);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 2);
addChildWithName("1", null);
addChildWithName("22", null);
addChildWithName("333", MaxDirectoryItemsExceededException.class);
addChildWithName("4444", PathComponentTooLongException.class);
}
示例11: verifyFsLimits
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; //导入依赖的package包/类
/**
* Verify that filesystem limit constraints are not violated
*
* @throws PathComponentTooLongException
* child's name is too long
* @throws MaxDirectoryItemsExceededException
* items per directory is exceeded
*/
protected <T extends INode> void verifyFsLimits(INode[] pathComponents,
int pos, T child)
throws FSLimitException, StorageException, TransactionContextException {
boolean includeChildName = false;
try {
if (maxComponentLength != 0) {
int length = child.getLocalName().length();
if (length > maxComponentLength) {
includeChildName = true;
throw new PathComponentTooLongException(maxComponentLength, length);
}
}
if (maxDirItems != 0) {
INodeDirectory parent = (INodeDirectory) pathComponents[pos - 1];
int count = parent.getChildrenList().size();
if (count >= maxDirItems) {
throw new MaxDirectoryItemsExceededException(maxDirItems, count);
}
}
} catch (FSLimitException e) {
String badPath = getFullPathName(pathComponents, pos - 1);
if (includeChildName) {
badPath += Path.SEPARATOR + child.getLocalName();
}
e.setPathName(badPath);
// Do not throw if edits log is still being processed
if (ready) {
throw (e);
}
// log pre-existing paths that exceed limits
NameNode.LOG
.error("FSDirectory.verifyFsLimits - " + e.getLocalizedMessage());
}
}