本文整理汇总了Java中org.apache.hadoop.fs.contract.ContractTestUtils类的典型用法代码示例。如果您正苦于以下问题:Java ContractTestUtils类的具体用法?Java ContractTestUtils怎么用?Java ContractTestUtils使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ContractTestUtils类属于org.apache.hadoop.fs.contract包,在下文中一共展示了ContractTestUtils类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createTestFiles
import org.apache.hadoop.fs.contract.ContractTestUtils; //导入依赖的package包/类
public static int createTestFiles(File sourceDir, int size)
throws IOException{
File subdir = new File(sourceDir, "subdir");
int expected = 0;
mkdirs(subdir);
File top = new File(sourceDir, "top");
FileUtils.write(top, "toplevel");
expected++;
for (int i = 0; i < size; i++) {
String text = String.format("file-%02d", i);
File f = new File(subdir, text);
FileUtils.write(f, f.toString());
}
expected += size;
// and write the largest file
File largest = new File(subdir, "largest");
FileUtils.writeByteArrayToFile(largest,
ContractTestUtils.dataset(8192, 32, 64));
expected++;
return expected;
}
示例2: testOpenCreate
import org.apache.hadoop.fs.contract.ContractTestUtils; //导入依赖的package包/类
@Test
public void testOpenCreate() throws IOException {
Path dir = new Path("/tests3a");
ContractTestUtils.createAndVerifyFile(fs, dir, 1024);
ContractTestUtils.createAndVerifyFile(fs, dir, 5 * 1024 * 1024);
ContractTestUtils.createAndVerifyFile(fs, dir, 20 * 1024 * 1024);
/*
Enable to test the multipart upload
try {
ContractTestUtils.createAndVerifyFile(fs, dir,
(long)6 * 1024 * 1024 * 1024);
} catch (IOException e) {
fail(e.getMessage());
}
*/
}
示例3: deepDirectoryStructure
import org.apache.hadoop.fs.contract.ContractTestUtils; //导入依赖的package包/类
/**
* Executes a test using a file system sub-tree with multiple nesting levels.
*
* @param srcFS source FileSystem
* @param srcDir source directory
* @param dstFS destination FileSystem
* @param dstDir destination directory
* @throws Exception if there is a failure
*/
private void deepDirectoryStructure(FileSystem srcFS, Path srcDir,
FileSystem dstFS, Path dstDir) throws Exception {
Path inputDir = new Path(srcDir, "inputDir");
Path inputSubDir1 = new Path(inputDir, "subDir1");
Path inputSubDir2 = new Path(inputDir, "subDir2/subDir3");
Path inputFile1 = new Path(inputDir, "file1");
Path inputFile2 = new Path(inputSubDir1, "file2");
Path inputFile3 = new Path(inputSubDir2, "file3");
mkdirs(srcFS, inputSubDir1);
mkdirs(srcFS, inputSubDir2);
byte[] data1 = dataset(100, 33, 43);
createFile(srcFS, inputFile1, true, data1);
byte[] data2 = dataset(200, 43, 53);
createFile(srcFS, inputFile2, true, data2);
byte[] data3 = dataset(300, 53, 63);
createFile(srcFS, inputFile3, true, data3);
Path target = new Path(dstDir, "outputDir");
runDistCp(inputDir, target);
ContractTestUtils.assertIsDirectory(dstFS, target);
verifyFileContents(dstFS, new Path(target, "inputDir/file1"), data1);
verifyFileContents(dstFS,
new Path(target, "inputDir/subDir1/file2"), data2);
verifyFileContents(dstFS,
new Path(target, "inputDir/subDir2/subDir3/file3"), data3);
}
示例4: largeFiles
import org.apache.hadoop.fs.contract.ContractTestUtils; //导入依赖的package包/类
/**
* Executes a test using multiple large files.
*
* @param srcFS source FileSystem
* @param srcDir source directory
* @param dstFS destination FileSystem
* @param dstDir destination directory
* @throws Exception if there is a failure
*/
private void largeFiles(FileSystem srcFS, Path srcDir, FileSystem dstFS,
Path dstDir) throws Exception {
Path inputDir = new Path(srcDir, "inputDir");
Path inputFile1 = new Path(inputDir, "file1");
Path inputFile2 = new Path(inputDir, "file2");
Path inputFile3 = new Path(inputDir, "file3");
mkdirs(srcFS, inputDir);
int fileSizeKb = conf.getInt("scale.test.distcp.file.size.kb", 10 * 1024);
int fileSizeMb = fileSizeKb / 1024;
getLog().info("{} with file size {}", testName.getMethodName(), fileSizeMb);
byte[] data1 = dataset((fileSizeMb + 1) * 1024 * 1024, 33, 43);
createFile(srcFS, inputFile1, true, data1);
byte[] data2 = dataset((fileSizeMb + 2) * 1024 * 1024, 43, 53);
createFile(srcFS, inputFile2, true, data2);
byte[] data3 = dataset((fileSizeMb + 3) * 1024 * 1024, 53, 63);
createFile(srcFS, inputFile3, true, data3);
Path target = new Path(dstDir, "outputDir");
runDistCp(inputDir, target);
ContractTestUtils.assertIsDirectory(dstFS, target);
verifyFileContents(dstFS, new Path(target, "inputDir/file1"), data1);
verifyFileContents(dstFS, new Path(target, "inputDir/file2"), data2);
verifyFileContents(dstFS, new Path(target, "inputDir/file3"), data3);
}
示例5: adjustContractToLocalEnvironment
import org.apache.hadoop.fs.contract.ContractTestUtils; //导入依赖的package包/类
/**
* tweak some of the contract parameters based on the local system
* state
*/
protected void adjustContractToLocalEnvironment() {
if (Shell.WINDOWS) {
//NTFS doesn't do case sensitivity, and its permissions are ACL-based
getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE), false);
getConf().setBoolean(getConfKey(ContractOptions.SUPPORTS_UNIX_PERMISSIONS), false);
} else if (ContractTestUtils.isOSX()) {
//OSX HFS+ is not case sensitive
getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE),
false);
}
}
示例6: testRenameWithNonEmptySubDirPOSIX
import org.apache.hadoop.fs.contract.ContractTestUtils; //导入依赖的package包/类
/**
* Test fallback rename code <code>handleEmptyDstDirectoryOnWindows()</code>
* even on not Windows platform where the normal <code>File.renameTo()</code>
* is supposed to work well. This test has been added for HADOOP-9805.
*
* @see AbstractContractRenameTest#testRenameWithNonEmptySubDirPOSIX()
*/
@Test
public void testRenameWithNonEmptySubDirPOSIX() throws Throwable {
final Path renameTestDir = path("testRenameWithNonEmptySubDir");
final Path srcDir = new Path(renameTestDir, "src1");
final Path srcSubDir = new Path(srcDir, "sub");
final Path finalDir = new Path(renameTestDir, "dest");
FileSystem fs = getFileSystem();
ContractTestUtils.rm(fs, renameTestDir, true, false);
fs.mkdirs(srcDir);
fs.mkdirs(finalDir);
ContractTestUtils.writeTextFile(fs, new Path(srcDir, "source.txt"),
"this is the file in src dir", false);
ContractTestUtils.writeTextFile(fs, new Path(srcSubDir, "subfile.txt"),
"this is the file in src/sub dir", false);
ContractTestUtils.assertPathExists(fs, "not created in src dir",
new Path(srcDir, "source.txt"));
ContractTestUtils.assertPathExists(fs, "not created in src/sub dir",
new Path(srcSubDir, "subfile.txt"));
RawLocalFileSystem rlfs = (RawLocalFileSystem) fs;
rlfs.handleEmptyDstDirectoryOnWindows(srcDir, rlfs.pathToFile(srcDir),
finalDir, rlfs.pathToFile(finalDir));
// Accept only POSIX rename behavior in this test
ContractTestUtils.assertPathExists(fs, "not renamed into dest dir",
new Path(finalDir, "source.txt"));
ContractTestUtils.assertPathExists(fs, "not renamed into dest/sub dir",
new Path(finalDir, "sub/subfile.txt"));
ContractTestUtils.assertPathDoesNotExist(fs, "not deleted",
new Path(srcDir, "source.txt"));
}
示例7: testRenameFileBeingAppended
import org.apache.hadoop.fs.contract.ContractTestUtils; //导入依赖的package包/类
@Override
public void testRenameFileBeingAppended() throws Throwable {
try {
super.testRenameFileBeingAppended();
fail("Expected a FileNotFoundException");
} catch (FileNotFoundException e) {
// downgrade
ContractTestUtils.downgrade("Renaming an open file" +
"still creates the old path", e);
}
}
示例8: testFastMultiPartUpload
import org.apache.hadoop.fs.contract.ContractTestUtils; //导入依赖的package包/类
@Test
public void testFastMultiPartUpload() throws Exception {
conf.setBoolean(Constants.FAST_UPLOAD, true);
fs = S3ATestUtils.createTestFileSystem(conf);
ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 16 * 1024 *
1024);
}
示例9: testOpenReadDir
import org.apache.hadoop.fs.contract.ContractTestUtils; //导入依赖的package包/类
@Override
public void testOpenReadDir() throws Throwable {
ContractTestUtils.skip("Skipping object-store quirk");
}
示例10: testOpenReadDirWithChild
import org.apache.hadoop.fs.contract.ContractTestUtils; //导入依赖的package包/类
@Override
public void testOpenReadDirWithChild() throws Throwable {
ContractTestUtils.skip("Skipping object-store quirk");
}
示例11: testOverwriteEmptyDirectory
import org.apache.hadoop.fs.contract.ContractTestUtils; //导入依赖的package包/类
@Override
public void testOverwriteEmptyDirectory() throws Throwable {
ContractTestUtils.skip("blobstores can't distinguish empty directories from files");
}
示例12: testOverwriteEmptyDirectory
import org.apache.hadoop.fs.contract.ContractTestUtils; //导入依赖的package包/类
@Override
public void testOverwriteEmptyDirectory() throws Throwable {
ContractTestUtils.skip(
"blobstores can't distinguish empty directories from files");
}
示例13: testRegularUpload
import org.apache.hadoop.fs.contract.ContractTestUtils; //导入依赖的package包/类
@Test
public void testRegularUpload() throws IOException {
ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 1024 * 1024);
}
示例14: testMultiPartUpload
import org.apache.hadoop.fs.contract.ContractTestUtils; //导入依赖的package包/类
@Test
public void testMultiPartUpload() throws IOException {
ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 6 * 1024 *
1024);
}
示例15: testBulkRenameAndDelete
import org.apache.hadoop.fs.contract.ContractTestUtils; //导入依赖的package包/类
@Test
public void testBulkRenameAndDelete() throws Throwable {
final Path scaleTestDir = getTestPath();
final Path srcDir = new Path(scaleTestDir, "src");
final Path finalDir = new Path(scaleTestDir, "final");
final long count = getOperationCount();
ContractTestUtils.rm(fs, scaleTestDir, true, false);
fs.mkdirs(srcDir);
fs.mkdirs(finalDir);
int testBufferSize = fs.getConf()
.getInt(ContractTestUtils.IO_CHUNK_BUFFER_SIZE,
ContractTestUtils.DEFAULT_IO_CHUNK_BUFFER_SIZE);
// use Executor to speed up file creation
ExecutorService exec = Executors.newFixedThreadPool(16);
final ExecutorCompletionService<Boolean> completionService =
new ExecutorCompletionService<Boolean>(exec);
try {
final byte[] data = ContractTestUtils.dataset(testBufferSize, 'a', 'z');
for (int i = 0; i < count; ++i) {
final String fileName = "foo-" + i;
completionService.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws IOException {
ContractTestUtils.createFile(fs, new Path(srcDir, fileName),
false, data);
return fs.exists(new Path(srcDir, fileName));
}
});
}
for (int i = 0; i < count; ++i) {
final Future<Boolean> future = completionService.take();
try {
if (!future.get()) {
LOG.warn("cannot create file");
}
} catch (ExecutionException e) {
LOG.warn("Error while uploading file", e.getCause());
throw e;
}
}
} finally {
exec.shutdown();
}
int nSrcFiles = fs.listStatus(srcDir).length;
fs.rename(srcDir, finalDir);
assertEquals(nSrcFiles, fs.listStatus(finalDir).length);
ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
new Path(srcDir, "foo-" + 0));
ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
new Path(srcDir, "foo-" + count / 2));
ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
new Path(srcDir, "foo-" + (count - 1)));
ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
new Path(finalDir, "foo-" + 0));
ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
new Path(finalDir, "foo-" + count/2));
ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
new Path(finalDir, "foo-" + (count-1)));
ContractTestUtils.assertDeleted(fs, finalDir, true, false);
}