本文整理汇总了Java中org.apache.hadoop.hdfs.client.HdfsDataInputStream.read方法的典型用法代码示例。如果您正苦于以下问题:Java HdfsDataInputStream.read方法的具体用法?Java HdfsDataInputStream.read怎么用?Java HdfsDataInputStream.read使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.client.HdfsDataInputStream
的用法示例。
在下文中一共展示了HdfsDataInputStream.read方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkUnsupportedMethod
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
private boolean checkUnsupportedMethod(FileSystem fs, Path file,
byte[] expected, int readOffset) throws IOException {
HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
IOUtils.skipFully(stm, readOffset);
try {
stm.read(actual);
} catch(UnsupportedOperationException unex) {
return true;
}
return false;
}
示例2: checkUnsupportedMethod
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
private boolean checkUnsupportedMethod(FileSystem fs, Path file,
byte[] expected, int readOffset) throws IOException {
HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
IOUtils.skipFully(stm, readOffset);
try {
stm.read(actual);
} catch(UnsupportedOperationException unex) {
return true;
}
return false;
}
示例3: testRamDiskShortCircuitRead
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
/**
* Read in-memory block with Short Circuit Read
* Note: the test uses faked RAM_DISK from physical disk.
*/
@Test
public void testRamDiskShortCircuitRead()
throws IOException, InterruptedException, TimeoutException {
getClusterBuilder().setUseScr(true).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
final int SEED = 0xFADED;
Path path = new Path("/" + METHOD_NAME + ".dat");
// Create a file and wait till it is persisted.
makeRandomTestFile(path, BLOCK_SIZE, true, SEED);
ensureFileReplicasOnStorageType(path, RAM_DISK);
waitForMetric("RamDiskBlocksLazyPersisted", 1);
HdfsDataInputStream fis = (HdfsDataInputStream) fs.open(path);
// Verify SCR read counters
try {
byte[] buf = new byte[BUFFER_LENGTH];
fis.read(0, buf, 0, BUFFER_LENGTH);
Assert.assertEquals(BUFFER_LENGTH,
fis.getReadStatistics().getTotalBytesRead());
Assert.assertEquals(BUFFER_LENGTH,
fis.getReadStatistics().getTotalShortCircuitBytesRead());
} finally {
fis.close();
fis = null;
}
}
示例4: tesScrDuringEviction
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
/**
* Eviction of lazy persisted blocks with Short Circuit Read handle open
* Note: the test uses faked RAM_DISK from physical disk.
* @throws IOException
* @throws InterruptedException
*/
@Test
public void tesScrDuringEviction()
throws Exception {
getClusterBuilder().setUseScr(true).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
// Create a file and wait till it is persisted.
makeTestFile(path1, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
waitForMetric("RamDiskBlocksLazyPersisted", 1);
HdfsDataInputStream fis = (HdfsDataInputStream) fs.open(path1);
try {
// Keep and open read handle to path1 while creating path2
byte[] buf = new byte[BUFFER_LENGTH];
fis.read(0, buf, 0, BUFFER_LENGTH);
triggerEviction(cluster.getDataNodes().get(0));
// Ensure path1 is still readable from the open SCR handle.
fis.read(0, buf, 0, BUFFER_LENGTH);
assertThat(fis.getReadStatistics().getTotalBytesRead(),
is((long) 2 * BUFFER_LENGTH));
assertThat(fis.getReadStatistics().getTotalShortCircuitBytesRead(),
is((long) 2 * BUFFER_LENGTH));
} finally {
IOUtils.closeQuietly(fis);
}
}
示例5: checkFileContentDirect
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
int readOffset, String readingUser, Configuration conf,
boolean legacyShortCircuitFails)
throws IOException, InterruptedException {
// Ensure short circuit is enabled
DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
ClientContext clientContext = ClientContext.getFromConf(conf);
if (legacyShortCircuitFails) {
assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
}
HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);
ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
IOUtils.skipFully(stm, readOffset);
actual.limit(3);
//Read a small number of bytes first.
int nread = stm.read(actual);
actual.limit(nread + 2);
nread += stm.read(actual);
// Read across chunk boundary
actual.limit(Math.min(actual.capacity(), nread + 517));
nread += stm.read(actual);
checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
"A few bytes");
//Now read rest of it
actual.limit(actual.capacity());
while (actual.hasRemaining()) {
int nbytes = stm.read(actual);
if (nbytes < 0) {
throw new EOFException("End of file reached before reading fully.");
}
nread += nbytes;
}
checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
if (legacyShortCircuitFails) {
assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
}
stm.close();
}
示例6: checkFileContentDirect
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
int readOffset, String readingUser, Configuration conf,
boolean legacyShortCircuitFails)
throws IOException, InterruptedException {
// Ensure short circuit is enabled
DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
ClientContext clientContext = ClientContext.getFromConf(conf);
if (legacyShortCircuitFails) {
assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
}
HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);
ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
IOUtils.skipFully(stm, readOffset);
actual.limit(3);
//Read a small number of bytes first.
int nread = stm.read(actual);
actual.limit(nread + 2);
nread += stm.read(actual);
// Read across chunk boundary
actual.limit(Math.min(actual.capacity(), nread + 517));
nread += stm.read(actual);
checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
"A few bytes");
//Now read rest of it
actual.limit(actual.capacity());
while (actual.hasRemaining()) {
int nbytes = stm.read(actual);
if (nbytes < 0) {
throw new EOFException("End of file reached before reading fully.");
}
nread += nbytes;
}
checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
if (legacyShortCircuitFails) {
assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
}
stm.close();
}
示例7: checkFileContentDirect
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入方法依赖的package包/类
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
int readOffset, String readingUser, Configuration conf,
boolean legacyShortCircuitFails)
throws IOException, InterruptedException {
// Ensure short circuit is enabled
DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
if (legacyShortCircuitFails) {
assertTrue(fs.getClient().useLegacyBlockReaderLocal());
}
HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);
ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
IOUtils.skipFully(stm, readOffset);
actual.limit(3);
//Read a small number of bytes first.
int nread = stm.read(actual);
actual.limit(nread + 2);
nread += stm.read(actual);
// Read across chunk boundary
actual.limit(Math.min(actual.capacity(), nread + 517));
nread += stm.read(actual);
checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
"A few bytes");
//Now read rest of it
actual.limit(actual.capacity());
while (actual.hasRemaining()) {
int nbytes = stm.read(actual);
if (nbytes < 0) {
throw new EOFException("End of file reached before reading fully.");
}
nread += nbytes;
}
checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
if (legacyShortCircuitFails) {
assertFalse(fs.getClient().useLegacyBlockReaderLocal());
}
stm.close();
}