本文整理汇总了Java中org.apache.hadoop.hdfs.DistributedFileSystem.setQuota方法的典型用法代码示例。如果您正苦于以下问题:Java DistributedFileSystem.setQuota方法的具体用法?Java DistributedFileSystem.setQuota怎么用?Java DistributedFileSystem.setQuota使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.DistributedFileSystem
的用法示例。
在下文中一共展示了DistributedFileSystem.setQuota方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testEditsLogOldRename
import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
* Perform operations such as setting quota, deletion of files, rename and
* ensure system can apply edits log during startup.
*/
@Test
public void testEditsLogOldRename() throws Exception {
DistributedFileSystem fs = cluster.getFileSystem();
Path src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
Path dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
createFile(src1);
fs.mkdirs(dst1.getParent());
createFile(dst1);
// Set quota so that dst1 parent cannot allow under it new files/directories
fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
// Free up quota for a subsequent rename
fs.delete(dst1, true);
oldRename(src1, dst1, true, false);
// Restart the cluster and ensure the above operations can be
// loaded from the edits log
restartCluster();
fs = cluster.getFileSystem();
src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
Assert.assertFalse(fs.exists(src1)); // ensure src1 is already renamed
Assert.assertTrue(fs.exists(dst1)); // ensure rename dst exists
}
示例2: testEditsLogRename
import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
* Perform operations such as setting quota, deletion of files, rename and
* ensure system can apply edits log during startup.
*/
@Test
public void testEditsLogRename() throws Exception {
DistributedFileSystem fs = cluster.getFileSystem();
Path src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
Path dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
createFile(src1);
fs.mkdirs(dst1.getParent());
createFile(dst1);
// Set quota so that dst1 parent cannot allow under it new files/directories
fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
// Free up quota for a subsequent rename
fs.delete(dst1, true);
rename(src1, dst1, true, true, false, Rename.OVERWRITE);
// Restart the cluster and ensure the above operations can be
// loaded from the edits log
restartCluster();
fs = cluster.getFileSystem();
src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
Assert.assertFalse(fs.exists(src1)); // ensure src1 is already renamed
Assert.assertTrue(fs.exists(dst1)); // ensure rename dst exists
}
示例3: testOldRenameWithQuota
import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
@Test
public void testOldRenameWithQuota() throws Exception {
DistributedFileSystem fs = cluster.getFileSystem();
Path src1 = getTestRootPath(fc, "test/testOldRenameWithQuota/srcdir/src1");
Path src2 = getTestRootPath(fc, "test/testOldRenameWithQuota/srcdir/src2");
Path dst1 = getTestRootPath(fc, "test/testOldRenameWithQuota/dstdir/dst1");
Path dst2 = getTestRootPath(fc, "test/testOldRenameWithQuota/dstdir/dst2");
createFile(src1);
createFile(src2);
fs.setQuota(src1.getParent(), HdfsConstants.QUOTA_DONT_SET,
HdfsConstants.QUOTA_DONT_SET);
fc.mkdir(dst1.getParent(), FileContext.DEFAULT_PERM, true);
fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
/*
* Test1: src does not exceed quota and dst has no quota check and hence
* accommodates rename
*/
oldRename(src1, dst1, true, false);
/*
* Test2: src does not exceed quota and dst has *no* quota to accommodate
* rename.
*/
// dstDir quota = 1 and dst1 already uses it
oldRename(src2, dst2, false, true);
/*
* Test3: src exceeds quota and dst has *no* quota to accommodate rename
*/
// src1 has no quota to accommodate new rename node
fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
oldRename(dst1, src1, false, true);
}
示例4: createSnapshot
import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
* Create snapshot for a dir using a given snapshot name
*
* @param hdfs DistributedFileSystem instance
* @param snapshotRoot The dir to be snapshotted
* @param snapshotName The name of the snapshot
* @return The path of the snapshot root
*/
public static Path createSnapshot(DistributedFileSystem hdfs,
Path snapshotRoot, String snapshotName) throws Exception {
LOG.info("createSnapshot " + snapshotName + " for " + snapshotRoot);
assertTrue(hdfs.exists(snapshotRoot));
hdfs.allowSnapshot(snapshotRoot);
hdfs.createSnapshot(snapshotRoot, snapshotName);
// set quota to a large value for testing counts
hdfs.setQuota(snapshotRoot, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
return SnapshotTestHelper.getSnapshotRoot(snapshotRoot, snapshotName);
}
示例5: testRenameWithQuota
import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
@Test
public void testRenameWithQuota() throws Exception {
DistributedFileSystem fs = cluster.getFileSystem();
Path src1 = getTestRootPath(fc, "test/testRenameWithQuota/srcdir/src1");
Path src2 = getTestRootPath(fc, "test/testRenameWithQuota/srcdir/src2");
Path dst1 = getTestRootPath(fc, "test/testRenameWithQuota/dstdir/dst1");
Path dst2 = getTestRootPath(fc, "test/testRenameWithQuota/dstdir/dst2");
createFile(src1);
createFile(src2);
fs.setQuota(src1.getParent(), HdfsConstants.QUOTA_DONT_SET,
HdfsConstants.QUOTA_DONT_SET);
fc.mkdir(dst1.getParent(), FileContext.DEFAULT_PERM, true);
fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
/*
* Test1: src does not exceed quota and dst has no quota check and hence
* accommodates rename
*/
// rename uses dstdir quota=1
rename(src1, dst1, false, true, false, Rename.NONE);
// rename reuses dstdir quota=1
rename(src2, dst1, true, true, false, Rename.OVERWRITE);
/*
* Test2: src does not exceed quota and dst has *no* quota to accommodate
* rename.
*/
// dstDir quota = 1 and dst1 already uses it
createFile(src2);
rename(src2, dst2, false, false, true, Rename.NONE);
/*
* Test3: src exceeds quota and dst has *no* quota to accommodate rename
* rename to a destination that does not exist
*/
// src1 has no quota to accommodate new rename node
fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
rename(dst1, src1, false, false, true, Rename.NONE);
/*
* Test4: src exceeds quota and dst has *no* quota to accommodate rename
* rename to a destination that exists and quota freed by deletion of dst
* is same as quota needed by src.
*/
// src1 has no quota to accommodate new rename node
fs.setQuota(src1.getParent(), 100, HdfsConstants.QUOTA_DONT_SET);
createFile(src1);
fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
rename(dst1, src1, true, true, false, Rename.OVERWRITE);
}
示例6: testQuotasTrackedOnStandby
import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
* Test that quotas are properly tracked by the standby through
* create, append, delete.
*/
@Test(timeout=60000)
public void testQuotasTrackedOnStandby() throws Exception {
fs.mkdirs(TEST_DIR);
DistributedFileSystem dfs = (DistributedFileSystem)fs;
dfs.setQuota(TEST_DIR, NS_QUOTA, DS_QUOTA);
long expectedSize = 3 * BLOCK_SIZE + BLOCK_SIZE/2;
DFSTestUtil.createFile(fs, TEST_FILE, expectedSize, (short)1, 1L);
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
ContentSummary cs = nn1.getRpcServer().getContentSummary(TEST_DIR_STR);
assertEquals(NS_QUOTA, cs.getQuota());
assertEquals(DS_QUOTA, cs.getSpaceQuota());
assertEquals(expectedSize, cs.getSpaceConsumed());
assertEquals(1, cs.getDirectoryCount());
assertEquals(1, cs.getFileCount());
// Append to the file and make sure quota is updated correctly.
FSDataOutputStream stm = fs.append(TEST_FILE);
try {
byte[] data = new byte[(int) (BLOCK_SIZE * 3 / 2)];
stm.write(data);
expectedSize += data.length;
} finally {
IOUtils.closeStream(stm);
}
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
cs = nn1.getRpcServer().getContentSummary(TEST_DIR_STR);
assertEquals(NS_QUOTA, cs.getQuota());
assertEquals(DS_QUOTA, cs.getSpaceQuota());
assertEquals(expectedSize, cs.getSpaceConsumed());
assertEquals(1, cs.getDirectoryCount());
assertEquals(1, cs.getFileCount());
fs.delete(TEST_FILE, true);
expectedSize = 0;
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
cs = nn1.getRpcServer().getContentSummary(TEST_DIR_STR);
assertEquals(NS_QUOTA, cs.getQuota());
assertEquals(DS_QUOTA, cs.getSpaceQuota());
assertEquals(expectedSize, cs.getSpaceConsumed());
assertEquals(1, cs.getDirectoryCount());
assertEquals(0, cs.getFileCount());
}
示例7: testGetFullPathNameAfterSetQuota
import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
* FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to
* replace the original INodeDirectory. Before HDFS-4243, the parent field of
* all the children INodes of the target INodeDirectory is not changed to
* point to the new INodeDirectoryWithQuota. This testcase tests this
* scenario.
*/
@Test
public void testGetFullPathNameAfterSetQuota() throws Exception {
long fileLen = 1024;
replication = 3;
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(replication).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNamesystem();
FSDirectory fsdir = fsn.getFSDirectory();
DistributedFileSystem dfs = cluster.getFileSystem();
// Create a file for test
final Path dir = new Path("/dir");
final Path file = new Path(dir, "file");
DFSTestUtil.createFile(dfs, file, fileLen, replication, 0L);
// Check the full path name of the INode associating with the file
INode fnode = fsdir.getINode(file.toString());
assertEquals(file.toString(), fnode.getFullPathName());
// Call FSDirectory#unprotectedSetQuota which calls
// INodeDirectory#replaceChild
dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10);
INodeDirectory dirNode = getDir(fsdir, dir);
assertEquals(dir.toString(), dirNode.getFullPathName());
assertTrue(dirNode.isWithQuota());
final Path newDir = new Path("/newdir");
final Path newFile = new Path(newDir, "file");
// Also rename dir
dfs.rename(dir, newDir, Options.Rename.OVERWRITE);
// /dir/file now should be renamed to /newdir/file
fnode = fsdir.getINode(newFile.toString());
// getFullPathName can return correct result only if the parent field of
// child node is set correctly
assertEquals(newFile.toString(), fnode.getFullPathName());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}