当前位置: 首页>>代码示例>>Java>>正文


Java TestFileCreation.writeFile方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.TestFileCreation.writeFile方法的典型用法代码示例。如果您正苦于以下问题:Java TestFileCreation.writeFile方法的具体用法?Java TestFileCreation.writeFile怎么用?Java TestFileCreation.writeFile使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.TestFileCreation的用法示例。


在下文中一共展示了TestFileCreation.writeFile方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: writeFile

import org.apache.hadoop.hdfs.TestFileCreation; //导入方法依赖的package包/类
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
  long blocksBefore = stm.getPos() / BLOCK_SIZE;
  
  TestFileCreation.writeFile(stm, BLOCK_SIZE);
  // need to make sure the full block is completely flushed to the DataNodes
  // (see FSOutputSummer#flush)
  stm.flush();
  int blocksAfter = 0;
  // wait until the block is allocated by DataStreamer
  BlockLocation[] locatedBlocks;
  while(blocksAfter <= blocksBefore) {
    locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
        file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
    blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestBlockUnderConstruction.java

示例2: writeFile

import org.apache.hadoop.hdfs.TestFileCreation; //导入方法依赖的package包/类
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
  long blocksBefore = stm.getPos() / BLOCK_SIZE;
  
  TestFileCreation.writeFile(stm, BLOCK_SIZE);
  int blocksAfter = 0;
  // wait until the block is allocated by DataStreamer
  BlockLocation[] locatedBlocks;
  while(blocksAfter <= blocksBefore) {
    locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
        file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
    blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:15,代码来源:TestBlockUnderConstruction.java

示例3: writeFile

import org.apache.hadoop.hdfs.TestFileCreation; //导入方法依赖的package包/类
void writeFile(Path file, FSDataOutputStream stm, int size)
    throws IOException {
  long blocksBefore = stm.getPos() / BLOCK_SIZE;
  
  TestFileCreation.writeFile(stm, BLOCK_SIZE);
  int blocksAfter = 0;
  // wait until the block is allocated by DataStreamer
  BlockLocation[] locatedBlocks;
  while (blocksAfter <= blocksBefore) {
    locatedBlocks = DFSClientAdapter.getDFSClient(hdfs)
        .getBlockLocations(file.toString(), 0L, BLOCK_SIZE * NUM_BLOCKS);
    blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:15,代码来源:TestBlockUnderConstruction.java

示例4: testBasicPutGet

import org.apache.hadoop.hdfs.TestFileCreation; //导入方法依赖的package包/类
@Test
public void testBasicPutGet()
    throws IOException, URISyntaxException,
    ServiceException, NoSuchAlgorithmException {
  S3HdfsPath s3HdfsPath = testUtil.setUpS3HdfsPath("rewrite", "readme.txt");

  // Create file and blank metadata in HDFS (but not s3)
  Path path = new Path(s3HdfsPath.getFullHdfsObjPath());
  FSDataOutputStream out =
      TestFileCreation.createFile(hdfs, path, 3);
  TestFileCreation.writeFile(out, 128);
  out.close();

  Path pathMeta = new Path(s3HdfsPath.getFullHdfsMetaPath());
  FSDataOutputStream outMeta = hdfs.create(pathMeta);
  outMeta.close();

  // Get the object
  S3Bucket bucket = new S3Bucket(s3HdfsPath.getBucketName());
  String objectKey = s3HdfsPath.getObjectName();

  S3Object returnedObject1 = s3Service.getObject(bucket.getName(), objectKey);
  System.out.println("RETURNED_OBJECT_1");
  System.out.println(returnedObject1); // returned has dataInputStream!

  // Verify the object
  assertEquals(bucket.getName(), returnedObject1.getBucketName());
  assertEquals(objectKey, returnedObject1.getKey());

  // verify returned data
  testUtil.compareS3ObjectWithHdfsFile(returnedObject1.getDataInputStream(),
      path);

  // List objects
  S3Object[] ls = s3Service.listObjects(bucket.getName());
  assertEquals("Should be one object", 1, ls.length);
  System.out.println("LISTED_OBJECTS_1");
  System.out.println(ls[0]);
}
 
开发者ID:WANdisco,项目名称:s3hdfs,代码行数:40,代码来源:TestBasicUsage.java

示例5: writeFile

import org.apache.hadoop.hdfs.TestFileCreation; //导入方法依赖的package包/类
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
  long blocksBefore = stm.getPos() / BLOCK_SIZE;
  
  TestFileCreation.writeFile(stm, BLOCK_SIZE);
  int blocksAfter = 0;
  // wait until the block is allocated by DataStreamer
  BlockLocation[] locatedBlocks;
  while(blocksAfter <= blocksBefore) {
    locatedBlocks = hdfs.getClient().getBlockLocations(
        file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
    blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
  }
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:15,代码来源:TestBlockUnderConstruction.java

示例6: testCountingFileTree

import org.apache.hadoop.hdfs.TestFileCreation; //导入方法依赖的package包/类
@Test
public void testCountingFileTree() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRIES_ON_FAILURE_KEY, 0);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();

    Path path0 = new Path("/folder0");
    Path path1 = new Path(path0.toUri().getPath(), "folder1");
    Path path2 = new Path(path1.toUri().getPath(), "folder2");
    Path file0 = new Path(path0.toUri().getPath(), "file0");
    Path file1 = new Path(path1.toUri().getPath(), "file1");
    Path file2 = new Path(path2.toUri().getPath(), "file2");
    Path file3 = new Path(path2.toUri().getPath(), "file3");

    DistributedFileSystem dfs = cluster.getFileSystem();
    dfs.mkdir(path0, FsPermission.getDefault());
    dfs.mkdir(path1, FsPermission.getDefault());
    dfs.mkdir(path2, FsPermission.getDefault());
    dfs.create(file0).close();
    final int bytes0 = 123;
    FSDataOutputStream stm = dfs.create(file1);
    TestFileCreation.writeFile(stm, bytes0);
    stm.close();
    dfs.create(file2).close();
    final int bytes1 = 253;
    stm = dfs.create(file3);
    TestFileCreation.writeFile(stm, bytes1);
    stm.close();

    AbstractFileTree.CountingFileTree fileTree = AbstractFileTree
            .createCountingFileTreeFromPath(cluster.getNamesystem(),
            path0.toUri().getPath());
    fileTree.buildUp();
    assertEquals(7, fileTree.getNamespaceCount());
    assertEquals(bytes0 + bytes1, fileTree.getDiskspaceCount());
    assertEquals(3, fileTree.getDirectoryCount());
    assertEquals(4, fileTree.getFileCount());
    assertEquals(fileTree.getDiskspaceCount(), fileTree.getFileSizeSummary());
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:48,代码来源:TestSubtreeLock.java


注:本文中的org.apache.hadoop.hdfs.TestFileCreation.writeFile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。