本文整理汇总了Java中org.apache.hadoop.fs.FSDataInputStream.getWrappedStream方法的典型用法代码示例。如果您正苦于以下问题:Java FSDataInputStream.getWrappedStream方法的具体用法?Java FSDataInputStream.getWrappedStream怎么用?Java FSDataInputStream.getWrappedStream使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FSDataInputStream
的用法示例。
在下文中一共展示了FSDataInputStream.getWrappedStream方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: check
import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
public static void check(FileSystem fs, Path p, long length) throws IOException {
int i = -1;
try {
final FileStatus status = fs.getFileStatus(p);
FSDataInputStream in = fs.open(p);
if (in.getWrappedStream() instanceof DFSInputStream) {
long len = ((DFSInputStream)in.getWrappedStream()).getFileLength();
assertEquals(length, len);
} else {
assertEquals(length, status.getLen());
}
for(i++; i < length; i++) {
assertEquals((byte)i, (byte)in.read());
}
i = -(int)length;
assertEquals(-1, in.read()); //EOF
in.close();
} catch(IOException ioe) {
throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
}
}
示例2: readDiskRanges
import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
static DiskRangeList readDiskRanges(FSDataInputStream file, HadoopShims.ZeroCopyReaderShim zcr,
long base, DiskRangeList range, boolean doForceDirect) throws IOException {
if (range == null)
return null;
DiskRangeList prev = range.prev;
if (prev == null) {
prev = new DiskRangeList.MutateHelper(range);
}
while (range != null) {
if (range.hasData()) {
range = range.next;
continue;
}
int len = (int) (range.getEnd() - range.getOffset());
long off = range.getOffset();
ByteBuffer bb = null;
if (file.getWrappedStream() instanceof ADataInputStream) {
ADataInputStream ads = (ADataInputStream) file.getWrappedStream();
bb = ByteBuffer.wrap(ads.getBuffer(), (int) (base + off), len);
} else {
// Don't use HDFS ByteBuffer API because it has no readFully, and is buggy and pointless.
byte[] buffer = new byte[len];
file.readFully((base + off), buffer, 0, buffer.length);
if (doForceDirect) {
bb = ByteBuffer.allocateDirect(len);
bb.put(buffer);
bb.position(0);
bb.limit(len);
} else {
bb = ByteBuffer.wrap(buffer);
}
}
range = range.replaceSelfWith(new BufferChunk(bb, range.getOffset()));
range = range.next;
}
return prev.next;
}
示例3: doPread
import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
private void doPread(FSDataInputStream stm, long position, byte[] buffer,
int offset, int length) throws IOException {
int nread = 0;
long totalRead = 0;
DFSInputStream dfstm = null;
if (stm.getWrappedStream() instanceof DFSInputStream) {
dfstm = (DFSInputStream) (stm.getWrappedStream());
totalRead = dfstm.getReadStatistics().getTotalBytesRead();
}
while (nread < length) {
int nbytes =
stm.read(position + nread, buffer, offset + nread, length - nread);
assertTrue("Error in pread", nbytes > 0);
nread += nbytes;
}
if (dfstm != null) {
if (isHedgedRead) {
assertTrue("Expected read statistic to be incremented", length <= dfstm
.getReadStatistics().getTotalBytesRead() - totalRead);
} else {
assertEquals("Expected read statistic to be incremented", length, dfstm
.getReadStatistics().getTotalBytesRead() - totalRead);
}
}
}