本文整理汇总了Java中org.apache.hadoop.hdfs.client.HdfsDataInputStream类的典型用法代码示例。如果您正苦于以下问题:Java HdfsDataInputStream类的具体用法?Java HdfsDataInputStream怎么用?Java HdfsDataInputStream使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
HdfsDataInputStream类属于org.apache.hadoop.hdfs.client包,在下文中一共展示了HdfsDataInputStream类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createWrappedInputStream
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入依赖的package包/类
/**
* Wraps the stream in a CryptoInputStream if the underlying file is
* encrypted.
*/
public HdfsDataInputStream createWrappedInputStream(DFSInputStream dfsis)
throws IOException {
final FileEncryptionInfo feInfo = dfsis.getFileEncryptionInfo();
if (feInfo != null) {
// File is encrypted, wrap the stream in a crypto stream.
// Currently only one version, so no special logic based on the version #
getCryptoProtocolVersion(feInfo);
final CryptoCodec codec = getCryptoCodec(conf, feInfo);
final KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
final CryptoInputStream cryptoIn =
new CryptoInputStream(dfsis, codec, decrypted.getMaterial(),
feInfo.getIV());
return new HdfsDataInputStream(cryptoIn);
} else {
// No FileEncryptionInfo so no encryption.
return new HdfsDataInputStream(dfsis);
}
}
示例2: checkFile
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入依赖的package包/类
static void checkFile(Path p, int expectedsize, final Configuration conf
) throws IOException, InterruptedException {
//open the file with another user account
final String username = UserGroupInformation.getCurrentUser().getShortUserName()
+ "_" + ++userCount;
UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username,
new String[] {"supergroup"});
final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);
//Check visible length
Assert.assertTrue(in.getVisibleLength() >= expectedsize);
//Able to read?
for(int i = 0; i < expectedsize; i++) {
Assert.assertEquals((byte)i, (byte)in.read());
}
in.close();
}
示例3: zeroCopyRead
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入依赖的package包/类
void zeroCopyRead(FileSystem fs,String path,
int readSize,int nloop) throws IOException{
long start_ts,end_ts,len=0;
ByteBuffer bb = ByteBuffer.allocate(readSize);
HdfsDataInputStream fsis = (HdfsDataInputStream)fs.open(new Path(path));
for(int i=0;i<nloop;i++){
fsis.seek(0);
len=0;
long ts1,ts2;
start_ts = System.nanoTime();
while(true){
bb = fsis.rdmaRead(readSize);
if(bb==null)break;
len += bb.remaining();
};
end_ts = System.nanoTime();
System.out.println(((double)len/(end_ts - start_ts))+" GB/s");
}
fsis.close();
}
示例4: checkFile
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入依赖的package包/类
static void checkFile(Path p, int expectedsize, final Configuration conf)
throws IOException, InterruptedException {
//open the file with another user account
final String username =
UserGroupInformation.getCurrentUser().getShortUserName() + "_" +
++userCount;
UserGroupInformation ugi = UserGroupInformation
.createUserForTesting(username, new String[]{"supergroup"});
final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
final HdfsDataInputStream in = (HdfsDataInputStream) fs.open(p);
//Check visible length
Assert.assertTrue(in.getVisibleLength() >= expectedsize);
//Able to read?
for (int i = 0; i < expectedsize; i++) {
Assert.assertEquals((byte) i, (byte) in.read());
}
in.close();
}
示例5: open
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入依赖的package包/类
@SuppressWarnings("deprecation")
@Override
public HdfsDataInputStream open(Path f, int bufferSize)
throws IOException, UnresolvedLinkException {
final DFSInputStream dfsis = dfs.open(getUriPath(f),
bufferSize, verifyChecksum);
return dfs.createWrappedInputStream(dfsis);
}
示例6: getFirstBlock
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入依赖的package包/类
public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
HdfsDataInputStream in = (HdfsDataInputStream) fs.open(path);
try {
in.readByte();
return in.getCurrentBlock();
} finally {
in.close();
}
}
示例7: readData
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入依赖的package包/类
/**
* Open the file to read from begin to end. Then close the file.
* Return number of bytes read.
* Support both sequential read and position read.
*/
private long readData(String fname, byte[] buffer, long byteExpected, long beginPosition)
throws IOException {
long totalByteRead = 0;
Path path = getFullyQualifiedPath(fname);
FSDataInputStream in = null;
try {
in = openInputStream(path);
long visibleLenFromReadStream = ((HdfsDataInputStream)in).getVisibleLength();
if (visibleLenFromReadStream < byteExpected)
{
throw new IOException(visibleLenFromReadStream
+ " = visibleLenFromReadStream < bytesExpected= "
+ byteExpected);
}
totalByteRead = readUntilEnd(in, buffer, buffer.length, fname,
beginPosition, visibleLenFromReadStream, positionReadOption);
in.close();
// reading more data than visibleLeng is OK, but not less
if (totalByteRead + beginPosition < byteExpected ){
throw new IOException("readData mismatch in byte read: expected="
+ byteExpected + " ; got " + (totalByteRead + beginPosition));
}
return totalByteRead + beginPosition;
} catch (IOException e) {
throw new IOException("##### Caught Exception in readData. "
+ "Total Byte Read so far = " + totalByteRead + " beginPosition = "
+ beginPosition, e);
} finally {
if (in != null)
in.close();
}
}
示例8: checkUnsupportedMethod
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入依赖的package包/类
private boolean checkUnsupportedMethod(FileSystem fs, Path file,
byte[] expected, int readOffset) throws IOException {
HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
IOUtils.skipFully(stm, readOffset);
try {
stm.read(actual);
} catch(UnsupportedOperationException unex) {
return true;
}
return false;
}
示例9: testRamDiskShortCircuitRead
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入依赖的package包/类
/**
* Read in-memory block with Short Circuit Read
* Note: the test uses faked RAM_DISK from physical disk.
*/
@Test
public void testRamDiskShortCircuitRead()
throws IOException, InterruptedException {
startUpCluster(REPL_FACTOR,
new StorageType[]{RAM_DISK, DEFAULT},
2 * BLOCK_SIZE - 1, true); // 1 replica + delta, SCR read
final String METHOD_NAME = GenericTestUtils.getMethodName();
final int SEED = 0xFADED;
Path path = new Path("/" + METHOD_NAME + ".dat");
makeRandomTestFile(path, BLOCK_SIZE, true, SEED);
ensureFileReplicasOnStorageType(path, RAM_DISK);
// Sleep for a short time to allow the lazy writer thread to do its job
Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
//assertThat(verifyReadRandomFile(path, BLOCK_SIZE, SEED), is(true));
FSDataInputStream fis = fs.open(path);
// Verify SCR read counters
try {
fis = fs.open(path);
byte[] buf = new byte[BUFFER_LENGTH];
fis.read(0, buf, 0, BUFFER_LENGTH);
HdfsDataInputStream dfsis = (HdfsDataInputStream) fis;
Assert.assertEquals(BUFFER_LENGTH,
dfsis.getReadStatistics().getTotalBytesRead());
Assert.assertEquals(BUFFER_LENGTH,
dfsis.getReadStatistics().getTotalShortCircuitBytesRead());
} finally {
fis.close();
fis = null;
}
}
示例10: checkUnsupportedMethod
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入依赖的package包/类
private boolean checkUnsupportedMethod(FileSystem fs, Path file,
byte[] expected, int readOffset) throws IOException {
HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
IOUtils.skipFully(stm, readOffset);
try {
stm.read(actual);
} catch(UnsupportedOperationException unex) {
return true;
}
return false;
}
示例11: testRamDiskShortCircuitRead
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入依赖的package包/类
/**
* Read in-memory block with Short Circuit Read
* Note: the test uses faked RAM_DISK from physical disk.
*/
@Test
public void testRamDiskShortCircuitRead()
throws IOException, InterruptedException, TimeoutException {
getClusterBuilder().setUseScr(true).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
final int SEED = 0xFADED;
Path path = new Path("/" + METHOD_NAME + ".dat");
// Create a file and wait till it is persisted.
makeRandomTestFile(path, BLOCK_SIZE, true, SEED);
ensureFileReplicasOnStorageType(path, RAM_DISK);
waitForMetric("RamDiskBlocksLazyPersisted", 1);
HdfsDataInputStream fis = (HdfsDataInputStream) fs.open(path);
// Verify SCR read counters
try {
byte[] buf = new byte[BUFFER_LENGTH];
fis.read(0, buf, 0, BUFFER_LENGTH);
Assert.assertEquals(BUFFER_LENGTH,
fis.getReadStatistics().getTotalBytesRead());
Assert.assertEquals(BUFFER_LENGTH,
fis.getReadStatistics().getTotalShortCircuitBytesRead());
} finally {
fis.close();
fis = null;
}
}
示例12: tesScrDuringEviction
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入依赖的package包/类
/**
* Eviction of lazy persisted blocks with Short Circuit Read handle open
* Note: the test uses faked RAM_DISK from physical disk.
* @throws IOException
* @throws InterruptedException
*/
@Test
public void tesScrDuringEviction()
throws Exception {
getClusterBuilder().setUseScr(true).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
// Create a file and wait till it is persisted.
makeTestFile(path1, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
waitForMetric("RamDiskBlocksLazyPersisted", 1);
HdfsDataInputStream fis = (HdfsDataInputStream) fs.open(path1);
try {
// Keep and open read handle to path1 while creating path2
byte[] buf = new byte[BUFFER_LENGTH];
fis.read(0, buf, 0, BUFFER_LENGTH);
triggerEviction(cluster.getDataNodes().get(0));
// Ensure path1 is still readable from the open SCR handle.
fis.read(0, buf, 0, BUFFER_LENGTH);
assertThat(fis.getReadStatistics().getTotalBytesRead(),
is((long) 2 * BUFFER_LENGTH));
assertThat(fis.getReadStatistics().getTotalShortCircuitBytesRead(),
is((long) 2 * BUFFER_LENGTH));
} finally {
IOUtils.closeQuietly(fis);
}
}
示例13: OpenEntity
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入依赖的package包/类
OpenEntity(final HdfsDataInputStream in, final long length,
final int outBufferSize, final DFSClient dfsclient) {
this.in = in;
this.length = length;
this.outBufferSize = outBufferSize;
this.dfsclient = dfsclient;
}
示例14: open
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; //导入依赖的package包/类
@SuppressWarnings("deprecation")
@Override
public HdfsDataInputStream open(Path f, int bufferSize)
throws IOException, UnresolvedLinkException {
return new DFSClient.DFSDataInputStream(dfs.open(getUriPath(f),
bufferSize, verifyChecksum));
}