当前位置: 首页>>代码示例>>Java>>正文


Java HdfsDataInputStream.close方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.client.HdfsDataInputStream.close方法的典型用法代码示例。如果您正苦于以下问题:Java HdfsDataInputStream.close方法的具体用法?Java HdfsDataInputStream.close怎么用?Java HdfsDataInputStream.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.client.HdfsDataInputStream的用法示例。


在下文中一共展示了HdfsDataInputStream.close方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkFile

import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
static void checkFile(Path p, int expectedsize, final Configuration conf
    ) throws IOException, InterruptedException {
  //open the file with another user account
  final String username = UserGroupInformation.getCurrentUser().getShortUserName()
      + "_" + ++userCount;

  UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, 
                               new String[] {"supergroup"});
  
  final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
  
  final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);

  //Check visible length
  Assert.assertTrue(in.getVisibleLength() >= expectedsize);

  //Able to read?
  for(int i = 0; i < expectedsize; i++) {
    Assert.assertEquals((byte)i, (byte)in.read());  
  }

  in.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestReadWhileWriting.java

示例2: zeroCopyRead

import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
void zeroCopyRead(FileSystem fs,String path,
   int readSize,int nloop) throws IOException{

   long start_ts,end_ts,len=0;

   ByteBuffer bb = ByteBuffer.allocate(readSize);
   HdfsDataInputStream fsis = (HdfsDataInputStream)fs.open(new Path(path));
   for(int i=0;i<nloop;i++){
     fsis.seek(0);
     len=0;
     long ts1,ts2;
     start_ts = System.nanoTime();
     while(true){
bb = fsis.rdmaRead(readSize);
       if(bb==null)break;
       len += bb.remaining();
     };
     end_ts = System.nanoTime();
     System.out.println(((double)len/(end_ts - start_ts))+" GB/s");
   }
   fsis.close();
 }
 
开发者ID:songweijia,项目名称:fffs,代码行数:23,代码来源:FileTester.java

示例3: checkFile

import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
static void checkFile(Path p, int expectedsize, final Configuration conf)
    throws IOException, InterruptedException {
  //open the file with another user account
  final String username =
      UserGroupInformation.getCurrentUser().getShortUserName() + "_" +
          ++userCount;

  UserGroupInformation ugi = UserGroupInformation
      .createUserForTesting(username, new String[]{"supergroup"});
  
  final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
  
  final HdfsDataInputStream in = (HdfsDataInputStream) fs.open(p);

  //Check visible length
  Assert.assertTrue(in.getVisibleLength() >= expectedsize);

  //Able to read?
  for (int i = 0; i < expectedsize; i++) {
    Assert.assertEquals((byte) i, (byte) in.read());
  }

  in.close();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:25,代码来源:TestReadWhileWriting.java

示例4: getFirstBlock

import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
  HdfsDataInputStream in = (HdfsDataInputStream) fs.open(path);
  try {
    in.readByte();
    return in.getCurrentBlock();
  } finally {
    in.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:DFSTestUtil.java

示例5: testRamDiskShortCircuitRead

import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
/**
 * Read in-memory block with Short Circuit Read
 * Note: the test uses faked RAM_DISK from physical disk.
 */
@Test
public void testRamDiskShortCircuitRead()
    throws IOException, InterruptedException, TimeoutException {
  getClusterBuilder().setUseScr(true).build();
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  final int SEED = 0xFADED;
  Path path = new Path("/" + METHOD_NAME + ".dat");

  // Create a file and wait till it is persisted.
  makeRandomTestFile(path, BLOCK_SIZE, true, SEED);
  ensureFileReplicasOnStorageType(path, RAM_DISK);
  waitForMetric("RamDiskBlocksLazyPersisted", 1);

  HdfsDataInputStream fis = (HdfsDataInputStream) fs.open(path);

  // Verify SCR read counters
  try {
    byte[] buf = new byte[BUFFER_LENGTH];
    fis.read(0, buf, 0, BUFFER_LENGTH);
    Assert.assertEquals(BUFFER_LENGTH,
      fis.getReadStatistics().getTotalBytesRead());
    Assert.assertEquals(BUFFER_LENGTH,
      fis.getReadStatistics().getTotalShortCircuitBytesRead());
  } finally {
    fis.close();
    fis = null;
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:33,代码来源:TestScrLazyPersistFiles.java

示例6: checkFileContentDirect

import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext clientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }
  
  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

  IOUtils.skipFully(stm, readOffset);

  actual.limit(3);

  //Read a small number of bytes first.
  int nread = stm.read(actual);
  actual.limit(nread + 2);
  nread += stm.read(actual);

  // Read across chunk boundary
  actual.limit(Math.min(actual.capacity(), nread + 517));
  nread += stm.read(actual);
  checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
      "A few bytes");
  //Now read rest of it
  actual.limit(actual.capacity());
  while (actual.hasRemaining()) {
    int nbytes = stm.read(actual);

    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:TestShortCircuitLocalRead.java

示例7: checkFileContentDirect

import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext clientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }

  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

  IOUtils.skipFully(stm, readOffset);

  actual.limit(3);

  //Read a small number of bytes first.
  int nread = stm.read(actual);
  actual.limit(nread + 2);
  nread += stm.read(actual);

  // Read across chunk boundary
  actual.limit(Math.min(actual.capacity(), nread + 517));
  nread += stm.read(actual);
  checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
      "A few bytes");
  //Now read rest of it
  actual.limit(actual.capacity());
  while (actual.hasRemaining()) {
    int nbytes = stm.read(actual);

    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:47,代码来源:TestShortCircuitLocalRead.java

示例8: testExternalBlockReader

import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
@Test
public void testExternalBlockReader() throws Exception {
  Configuration conf = new Configuration();
  conf.set(HdfsClientConfigKeys.REPLICA_ACCESSOR_BUILDER_CLASSES_KEY,
      SyntheticReplicaAccessorBuilder.class.getName());
  conf.setLong(HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
  String uuid = UUID.randomUUID().toString();
  conf.set(SYNTHETIC_BLOCK_READER_TEST_UUID_KEY, uuid);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1)
      .build();
  final int TEST_LENGTH = 2047;
  DistributedFileSystem dfs = cluster.getFileSystem();
  try {
    DFSTestUtil.createFile(dfs, new Path("/a"), TEST_LENGTH, (short)1, SEED);
    HdfsDataInputStream stream =
        (HdfsDataInputStream)dfs.open(new Path("/a"));
    byte buf[] = new byte[TEST_LENGTH];
    stream.seek(1000);
    IOUtils.readFully(stream, buf, 1000, TEST_LENGTH - 1000);
    stream.seek(0);
    IOUtils.readFully(stream, buf, 0, 1000);
    byte expected[] = DFSTestUtil.
        calculateFileContentsFromSeed(SEED, TEST_LENGTH);
    ReadStatistics stats = stream.getReadStatistics();
    Assert.assertEquals(1024, stats.getTotalShortCircuitBytesRead());
    Assert.assertEquals(2047, stats.getTotalLocalBytesRead());
    Assert.assertEquals(2047, stats.getTotalBytesRead());
    Assert.assertArrayEquals(expected, buf);
    stream.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(dfs, new Path("/a"));
    Assert.assertNotNull(block);
    LinkedList<SyntheticReplicaAccessor> accessorList = accessors.get(uuid);
    Assert.assertNotNull(accessorList);
    Assert.assertEquals(3, accessorList.size());
    SyntheticReplicaAccessor accessor = accessorList.get(0);
    Assert.assertTrue(accessor.builder.allowShortCircuit);
    Assert.assertEquals(block.getBlockPoolId(),
        accessor.builder.blockPoolId);
    Assert.assertEquals(block.getBlockId(),
        accessor.builder.blockId);
    Assert.assertEquals(dfs.getClient().clientName,
        accessor.builder.clientName);
    Assert.assertEquals("/a", accessor.builder.fileName);
    Assert.assertEquals(block.getGenerationStamp(),
        accessor.getGenerationStamp());
    Assert.assertTrue(accessor.builder.verifyChecksum);
    Assert.assertEquals(1024L, accessor.builder.visibleLength);
    Assert.assertEquals(24L, accessor.totalRead);
    Assert.assertEquals("", accessor.getError());
    Assert.assertEquals(1, accessor.numCloses);
    byte[] tempBuf = new byte[5];
    Assert.assertEquals(-1, accessor.read(TEST_LENGTH,
          tempBuf, 0, 0));
    Assert.assertEquals(-1, accessor.read(TEST_LENGTH,
          tempBuf, 0, tempBuf.length));
    accessors.remove(uuid);
  } finally {
    dfs.close();
    cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:64,代码来源:TestExternalBlockReader.java

示例9: checkFileContentDirect

import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  if (legacyShortCircuitFails) {
    assertTrue(fs.getClient().useLegacyBlockReaderLocal());
  }
  
  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

  IOUtils.skipFully(stm, readOffset);

  actual.limit(3);

  //Read a small number of bytes first.
  int nread = stm.read(actual);
  actual.limit(nread + 2);
  nread += stm.read(actual);

  // Read across chunk boundary
  actual.limit(Math.min(actual.capacity(), nread + 517));
  nread += stm.read(actual);
  checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
      "A few bytes");
  //Now read rest of it
  actual.limit(actual.capacity());
  while (actual.hasRemaining()) {
    int nbytes = stm.read(actual);

    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
  if (legacyShortCircuitFails) {
    assertFalse(fs.getClient().useLegacyBlockReaderLocal());
  }
  stm.close();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:46,代码来源:TestShortCircuitLocalRead.java


注:本文中的org.apache.hadoop.hdfs.client.HdfsDataInputStream.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。