当前位置: 首页>>代码示例>>Java>>正文


Java ClientContext.getFromConf方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.ClientContext.getFromConf方法的典型用法代码示例。如果您正苦于以下问题:Java ClientContext.getFromConf方法的具体用法?Java ClientContext.getFromConf怎么用?Java ClientContext.getFromConf使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.ClientContext的用法示例。


在下文中一共展示了ClientContext.getFromConf方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkFileContent

import org.apache.hadoop.hdfs.ClientContext; //导入方法依赖的package包/类
/** Check file content, reading as user {@code readingUser} */
static void checkFileContent(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext getClientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertFalse(getClientContext.getDisableLegacyBlockReaderLocal());
  }
  
  FSDataInputStream stm = fs.open(name);
  byte[] actual = new byte[expected.length-readOffset];
  stm.readFully(readOffset, actual);
  checkData(actual, readOffset, expected, "Read 2");
  stm.close();
  // Now read using a different API.
  actual = new byte[expected.length-readOffset];
  stm = fs.open(name);
  IOUtils.skipFully(stm, readOffset);
  //Read a small number of bytes first.
  int nread = stm.read(actual, 0, 3);
  nread += stm.read(actual, nread, 2);
  //Read across chunk boundary
  nread += stm.read(actual, nread, 517);
  checkData(actual, readOffset, expected, nread, "A few bytes");
  //Now read rest of it
  while (nread < actual.length) {
    int nbytes = stm.read(actual, nread, actual.length - nread);
    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(actual, readOffset, expected, "Read 3");
  
  if (legacyShortCircuitFails) {
    assertTrue(getClientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestShortCircuitLocalRead.java

示例2: checkFileContentDirect

import org.apache.hadoop.hdfs.ClientContext; //导入方法依赖的package包/类
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext clientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }
  
  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

  IOUtils.skipFully(stm, readOffset);

  actual.limit(3);

  //Read a small number of bytes first.
  int nread = stm.read(actual);
  actual.limit(nread + 2);
  nread += stm.read(actual);

  // Read across chunk boundary
  actual.limit(Math.min(actual.capacity(), nread + 517));
  nread += stm.read(actual);
  checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
      "A few bytes");
  //Now read rest of it
  actual.limit(actual.capacity());
  while (actual.hasRemaining()) {
    int nbytes = stm.read(actual);

    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:TestShortCircuitLocalRead.java

示例3: checkFileContent

import org.apache.hadoop.hdfs.ClientContext; //导入方法依赖的package包/类
/** Check file content, reading as user {@code readingUser} */
static void checkFileContent(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext getClientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertFalse(getClientContext.getDisableLegacyBlockReaderLocal());
  }

  FSDataInputStream stm = fs.open(name);
  byte[] actual = new byte[expected.length-readOffset];
  stm.readFully(readOffset, actual);
  checkData(actual, readOffset, expected, "Read 2");
  stm.close();
  // Now read using a different API.
  actual = new byte[expected.length-readOffset];
  stm = fs.open(name);
  IOUtils.skipFully(stm, readOffset);
  //Read a small number of bytes first.
  int nread = stm.read(actual, 0, 3);
  nread += stm.read(actual, nread, 2);
  //Read across chunk boundary
  nread += stm.read(actual, nread, 517);
  checkData(actual, readOffset, expected, nread, "A few bytes");
  //Now read rest of it
  while (nread < actual.length) {
    int nbytes = stm.read(actual, nread, actual.length - nread);
    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(actual, readOffset, expected, "Read 3");

  if (legacyShortCircuitFails) {
    assertTrue(getClientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:43,代码来源:TestShortCircuitLocalRead.java

示例4: checkFileContentDirect

import org.apache.hadoop.hdfs.ClientContext; //导入方法依赖的package包/类
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext clientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }

  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

  IOUtils.skipFully(stm, readOffset);

  actual.limit(3);

  //Read a small number of bytes first.
  int nread = stm.read(actual);
  actual.limit(nread + 2);
  nread += stm.read(actual);

  // Read across chunk boundary
  actual.limit(Math.min(actual.capacity(), nread + 517));
  nread += stm.read(actual);
  checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
      "A few bytes");
  //Now read rest of it
  actual.limit(actual.capacity());
  while (actual.hasRemaining()) {
    int nbytes = stm.read(actual);

    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:47,代码来源:TestShortCircuitLocalRead.java


注:本文中的org.apache.hadoop.hdfs.ClientContext.getFromConf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。