当前位置: 首页>>代码示例>>Java>>正文


Java HdfsDataInputStream.read方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.client.HdfsDataInputStream.read方法的典型用法代码示例。如果您正苦于以下问题:Java HdfsDataInputStream.read方法的具体用法?Java HdfsDataInputStream.read怎么用?Java HdfsDataInputStream.read使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.client.HdfsDataInputStream的用法示例。


在下文中一共展示了HdfsDataInputStream.read方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkUnsupportedMethod

import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
private boolean checkUnsupportedMethod(FileSystem fs, Path file,
                                         byte[] expected, int readOffset) throws IOException {
  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
  IOUtils.skipFully(stm, readOffset);
  try {
    stm.read(actual);
  } catch(UnsupportedOperationException unex) {
    return true;
  }
  return false;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestShortCircuitLocalRead.java

示例2: checkUnsupportedMethod

import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
private boolean checkUnsupportedMethod(FileSystem fs, Path file,
    byte[] expected, int readOffset) throws IOException {
  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
  IOUtils.skipFully(stm, readOffset);
  try {
    stm.read(actual);
  } catch(UnsupportedOperationException unex) {
    return true;
  }
  return false;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:13,代码来源:TestShortCircuitLocalRead.java

示例3: testRamDiskShortCircuitRead

import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
/**
 * Read in-memory block with Short Circuit Read
 * Note: the test uses faked RAM_DISK from physical disk.
 */
@Test
public void testRamDiskShortCircuitRead()
    throws IOException, InterruptedException, TimeoutException {
  getClusterBuilder().setUseScr(true).build();
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  final int SEED = 0xFADED;
  Path path = new Path("/" + METHOD_NAME + ".dat");

  // Create a file and wait till it is persisted.
  makeRandomTestFile(path, BLOCK_SIZE, true, SEED);
  ensureFileReplicasOnStorageType(path, RAM_DISK);
  waitForMetric("RamDiskBlocksLazyPersisted", 1);

  HdfsDataInputStream fis = (HdfsDataInputStream) fs.open(path);

  // Verify SCR read counters
  try {
    byte[] buf = new byte[BUFFER_LENGTH];
    fis.read(0, buf, 0, BUFFER_LENGTH);
    Assert.assertEquals(BUFFER_LENGTH,
      fis.getReadStatistics().getTotalBytesRead());
    Assert.assertEquals(BUFFER_LENGTH,
      fis.getReadStatistics().getTotalShortCircuitBytesRead());
  } finally {
    fis.close();
    fis = null;
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:33,代码来源:TestScrLazyPersistFiles.java

示例4: tesScrDuringEviction

import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
/**
 * Eviction of lazy persisted blocks with Short Circuit Read handle open
 * Note: the test uses faked RAM_DISK from physical disk.
 * @throws IOException
 * @throws InterruptedException
 */
@Test
public void tesScrDuringEviction()
    throws Exception {
  getClusterBuilder().setUseScr(true).build();
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");

  // Create a file and wait till it is persisted.
  makeTestFile(path1, BLOCK_SIZE, true);
  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  waitForMetric("RamDiskBlocksLazyPersisted", 1);

  HdfsDataInputStream fis = (HdfsDataInputStream) fs.open(path1);
  try {
    // Keep and open read handle to path1 while creating path2
    byte[] buf = new byte[BUFFER_LENGTH];
    fis.read(0, buf, 0, BUFFER_LENGTH);
    triggerEviction(cluster.getDataNodes().get(0));

    // Ensure path1 is still readable from the open SCR handle.
    fis.read(0, buf, 0, BUFFER_LENGTH);
    assertThat(fis.getReadStatistics().getTotalBytesRead(),
        is((long) 2 * BUFFER_LENGTH));
    assertThat(fis.getReadStatistics().getTotalShortCircuitBytesRead(),
        is((long) 2 * BUFFER_LENGTH));
  } finally {
    IOUtils.closeQuietly(fis);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:36,代码来源:TestScrLazyPersistFiles.java

示例5: checkFileContentDirect

import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext clientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }
  
  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

  IOUtils.skipFully(stm, readOffset);

  actual.limit(3);

  //Read a small number of bytes first.
  int nread = stm.read(actual);
  actual.limit(nread + 2);
  nread += stm.read(actual);

  // Read across chunk boundary
  actual.limit(Math.min(actual.capacity(), nread + 517));
  nread += stm.read(actual);
  checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
      "A few bytes");
  //Now read rest of it
  actual.limit(actual.capacity());
  while (actual.hasRemaining()) {
    int nbytes = stm.read(actual);

    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:TestShortCircuitLocalRead.java

示例6: checkFileContentDirect

import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext clientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }

  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

  IOUtils.skipFully(stm, readOffset);

  actual.limit(3);

  //Read a small number of bytes first.
  int nread = stm.read(actual);
  actual.limit(nread + 2);
  nread += stm.read(actual);

  // Read across chunk boundary
  actual.limit(Math.min(actual.capacity(), nread + 517));
  nread += stm.read(actual);
  checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
      "A few bytes");
  //Now read rest of it
  actual.limit(actual.capacity());
  while (actual.hasRemaining()) {
    int nbytes = stm.read(actual);

    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:47,代码来源:TestShortCircuitLocalRead.java

示例7: checkFileContentDirect

import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  if (legacyShortCircuitFails) {
    assertTrue(fs.getClient().useLegacyBlockReaderLocal());
  }
  
  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

  IOUtils.skipFully(stm, readOffset);

  actual.limit(3);

  //Read a small number of bytes first.
  int nread = stm.read(actual);
  actual.limit(nread + 2);
  nread += stm.read(actual);

  // Read across chunk boundary
  actual.limit(Math.min(actual.capacity(), nread + 517));
  nread += stm.read(actual);
  checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
      "A few bytes");
  //Now read rest of it
  actual.limit(actual.capacity());
  while (actual.hasRemaining()) {
    int nbytes = stm.read(actual);

    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
  if (legacyShortCircuitFails) {
    assertFalse(fs.getClient().useLegacyBlockReaderLocal());
  }
  stm.close();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:46,代码来源:TestShortCircuitLocalRead.java


注:本文中的org.apache.hadoop.hdfs.client.HdfsDataInputStream.read方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。