本文整理汇总了Java中org.apache.hadoop.hdfs.TestFileCreation类的典型用法代码示例。如果您正苦于以下问题:Java TestFileCreation类的具体用法?Java TestFileCreation怎么用?Java TestFileCreation使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
TestFileCreation类属于org.apache.hadoop.hdfs包,在下文中一共展示了TestFileCreation类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: writeFile
import org.apache.hadoop.hdfs.TestFileCreation; //导入依赖的package包/类
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
long blocksBefore = stm.getPos() / BLOCK_SIZE;
TestFileCreation.writeFile(stm, BLOCK_SIZE);
// need to make sure the full block is completely flushed to the DataNodes
// (see FSOutputSummer#flush)
stm.flush();
int blocksAfter = 0;
// wait until the block is allocated by DataStreamer
BlockLocation[] locatedBlocks;
while(blocksAfter <= blocksBefore) {
locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
}
}
示例2: testBlockCreation
import org.apache.hadoop.hdfs.TestFileCreation; //导入依赖的package包/类
@Test
public void testBlockCreation() throws IOException {
Path file1 = new Path(BASE_DIR, "file1.dat");
FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3);
for(int idx = 0; idx < NUM_BLOCKS; idx++) {
// write one block
writeFile(file1, out, BLOCK_SIZE);
// verify consistency
verifyFileBlocks(file1.toString(), true);
}
// close file
out.close();
// verify consistency
verifyFileBlocks(file1.toString(), false);
}
示例3: testWebHdfsCreateNonRecursive
import org.apache.hadoop.hdfs.TestFileCreation; //导入依赖的package包/类
@Test
public void testWebHdfsCreateNonRecursive() throws IOException, URISyntaxException {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
WebHdfsFileSystem webHdfs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
TestFileCreation.testFileCreationNonRecursive(webHdfs);
} finally {
if(webHdfs != null) {
webHdfs.close();
}
if(cluster != null) {
cluster.shutdown();
}
}
}
示例4: testDeleteUnclosed
import org.apache.hadoop.hdfs.TestFileCreation; //导入依赖的package包/类
@Test
public void testDeleteUnclosed() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRIES_ON_FAILURE_KEY, 0);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
assertTrue(fs.mkdir(new Path("/foo"), FsPermission.getDefault()));
TestFileCreation.createFile(fs, new Path("/foo/bar"), 1);
assertTrue(fs.delete(new Path("/foo/bar"), true));
assertFalse(fs.exists(new Path("/foo/bar")));
assertFalse("Not All subtree locks were removed after operation ", subTreeLocksExists());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例5: testDeleteSimple
import org.apache.hadoop.hdfs.TestFileCreation; //导入依赖的package包/类
@Test
public void testDeleteSimple() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRIES_ON_FAILURE_KEY, 0);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
assertTrue(fs.mkdir(new Path("/foo"), FsPermission.getDefault()));
TestFileCreation.createFile(fs, new Path("/foo/bar"), 1).close();
assertTrue(fs.delete(new Path("/foo"), true));
assertFalse(fs.exists(new Path("/foo")));
assertFalse("Not All subtree locks were removed after operation ", subTreeLocksExists());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例6: testDepricatedRenameMoveFiles
import org.apache.hadoop.hdfs.TestFileCreation; //导入依赖的package包/类
@Test
public void testDepricatedRenameMoveFiles() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRIES_ON_FAILURE_KEY, 0);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
TestFileCreation.createFile(fs, new Path("/foo/file1.txt"), 1).close();
TestFileCreation.createFile(fs, new Path("/bar/file1.txt"), 1).close();
assertTrue("Rename Failed", fs.rename(new Path("/foo/file1.txt"), new Path("/bar/file2.txt")));
assertFalse("Not All subtree locks were removed after operation ", subTreeLocksExists());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例7: testBlockCreation
import org.apache.hadoop.hdfs.TestFileCreation; //导入依赖的package包/类
@Test
public void testBlockCreation() throws IOException {
Path file1 = new Path(BASE_DIR, "file1.dat");
FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3);
for (int idx = 0; idx < NUM_BLOCKS; idx++) {
// write one block
writeFile(file1, out, BLOCK_SIZE);
// verify consistency
verifyFileBlocks(file1.toString(), true);
}
// close file
out.close();
// verify consistency
verifyFileBlocks(file1.toString(), false);
}
示例8: testGetBlockLocations
import org.apache.hadoop.hdfs.TestFileCreation; //导入依赖的package包/类
/**
* Test NameNode.getBlockLocations(..) on reading un-closed files.
*/
@Test
public void testGetBlockLocations() throws IOException {
final NamenodeProtocols namenode = cluster.getNameNodeRpc();
final Path p = new Path(BASE_DIR, "file2.dat");
final String src = p.toString();
final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);
// write a half block
int len = BLOCK_SIZE >>> 1;
writeFile(p, out, len);
for(int i = 1; i < NUM_BLOCKS; ) {
// verify consistency
final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
final List<LocatedBlock> blocks = lb.getLocatedBlocks();
assertEquals(i, blocks.size());
final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
assertTrue(b instanceof BlockInfoContiguousUnderConstruction);
if (++i < NUM_BLOCKS) {
// write one more block
writeFile(p, out, BLOCK_SIZE);
len += BLOCK_SIZE;
}
}
// close file
out.close();
}
示例9: testGetBlockLocations
import org.apache.hadoop.hdfs.TestFileCreation; //导入依赖的package包/类
/**
* Test NameNode.getBlockLocations(..) on reading un-closed files.
*/
@Test
public void testGetBlockLocations() throws IOException {
final NamenodeProtocols namenode = cluster.getNameNodeRpc();
final BlockManager blockManager = cluster.getNamesystem().getBlockManager();
final Path p = new Path(BASE_DIR, "file2.dat");
final String src = p.toString();
final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);
// write a half block
int len = BLOCK_SIZE >>> 1;
writeFile(p, out, len);
for(int i = 1; i < NUM_BLOCKS; ) {
// verify consistency
final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
final List<LocatedBlock> blocks = lb.getLocatedBlocks();
assertEquals(i, blocks.size());
final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
assertFalse(blockManager.getStoredBlock(b).isComplete());
if (++i < NUM_BLOCKS) {
// write one more block
writeFile(p, out, BLOCK_SIZE);
len += BLOCK_SIZE;
}
}
// close file
out.close();
}
示例10: testGetBlockLocations
import org.apache.hadoop.hdfs.TestFileCreation; //导入依赖的package包/类
/**
* Test NameNode.getBlockLocations(..) on reading un-closed files.
*/
@Test
public void testGetBlockLocations() throws IOException {
final NamenodeProtocols namenode = cluster.getNameNodeRpc();
final Path p = new Path(BASE_DIR, "file2.dat");
final String src = p.toString();
final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);
// write a half block
int len = BLOCK_SIZE >>> 1;
writeFile(p, out, len);
for(int i = 1; i < NUM_BLOCKS; ) {
// verify consistency
final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
final List<LocatedBlock> blocks = lb.getLocatedBlocks();
assertEquals(i, blocks.size());
final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
assertTrue(b instanceof BlockInfoUnderConstruction);
if (++i < NUM_BLOCKS) {
// write one more block
writeFile(p, out, BLOCK_SIZE);
len += BLOCK_SIZE;
}
}
// close file
out.close();
}
示例11: writeFile
import org.apache.hadoop.hdfs.TestFileCreation; //导入依赖的package包/类
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
long blocksBefore = stm.getPos() / BLOCK_SIZE;
TestFileCreation.writeFile(stm, BLOCK_SIZE);
int blocksAfter = 0;
// wait until the block is allocated by DataStreamer
BlockLocation[] locatedBlocks;
while(blocksAfter <= blocksBefore) {
locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
}
}
示例12: testDelete
import org.apache.hadoop.hdfs.TestFileCreation; //导入依赖的package包/类
@Test
public void testDelete() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRIES_ON_FAILURE_KEY, 0);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
assertTrue(fs.mkdir(new Path("/foo"), FsPermission.getDefault()));
TestFileCreation.createFile(fs, new Path("/foo/bar"), 1).close();
assertTrue(fs.delete(new Path("/foo/bar"), false));
assertFalse(fs.exists(new Path("/foo/bar")));
TestFileCreation.createFile(fs, new Path("/foo/bar"), 1).close();
assertTrue(fs.delete(new Path("/foo/bar"), true));
assertFalse(fs.exists(new Path("/foo/bar")));
assertTrue(fs.mkdir(new Path("/foo/bar"), FsPermission.getDefault()));
TestFileCreation.createFile(fs, new Path("/foo/bar/foo"), 1).close();
assertTrue(fs.delete(new Path("/foo"), true));
assertFalse(fs.exists(new Path("/foo/bar/foo")));
assertFalse(fs.exists(new Path("/foo/bar/foo")));
assertFalse(fs.exists(new Path("/foo/bar")));
assertFalse(fs.exists(new Path("/foo")));
assertFalse("Not All subtree locks were removed after operation ", subTreeLocksExists());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例13: testMove
import org.apache.hadoop.hdfs.TestFileCreation; //导入依赖的package包/类
@Test
public void testMove() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRIES_ON_FAILURE_KEY, 0);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
assertTrue(fs.mkdir(new Path("/foo"), FsPermission.getDefault()));
TestFileCreation.createFile(fs, new Path("/foo/bar"), 1).close();
assertTrue(fs.mkdir(new Path("/foo1"), FsPermission.getDefault()));
TestFileCreation.createFile(fs, new Path("/foo1/bar1"), 1).close();
fs.rename(new Path("/foo1/bar1"), new Path("/foo/bar1"),
Options.Rename.OVERWRITE);
assertTrue(fs.exists(new Path("/foo/bar1")));
assertFalse(fs.exists(new Path("/foo1/bar1")));
assertFalse("Not All subtree locks were removed after operation ", subTreeLocksExists());
try {
fs.rename(new Path("/foo1/bar"), new Path("/foo/bar1"),
Options.Rename.OVERWRITE);
fail();
} catch (FileNotFoundException e) {
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例14: testSubtreeIgnoreLockRequest
import org.apache.hadoop.hdfs.TestFileCreation; //导入依赖的package包/类
@Test
public void testSubtreeIgnoreLockRequest() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRIES_ON_FAILURE_KEY, 0);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
dfs.mkdirs(new Path("/foo"));
TestFileCreation.createFile(dfs, new Path("/foo/file1.txt"), 1).close();
boolean isException = false;
Exception exception = null;
INodeIdentifier inode = cluster.getNamesystem().lockSubtree("/foo/file1.txt", SubTreeOperation.StoOperationType.NA);
if (inode != null) {
fail("nothing should have been locked");
}
inode = cluster.getNamesystem().lockSubtree("/", SubTreeOperation.StoOperationType.NA);
if (inode != null) {
fail("root should not have been locked");
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例15: writeFile
import org.apache.hadoop.hdfs.TestFileCreation; //导入依赖的package包/类
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
long blocksBefore = stm.getPos() / BLOCK_SIZE;
TestFileCreation.writeFile(stm, BLOCK_SIZE);
int blocksAfter = 0;
// wait until the block is allocated by DataStreamer
BlockLocation[] locatedBlocks;
while (blocksAfter <= blocksBefore) {
locatedBlocks = DFSClientAdapter.getDFSClient(hdfs)
.getBlockLocations(file.toString(), 0L, BLOCK_SIZE * NUM_BLOCKS);
blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
}
}