本文整理汇总了Java中org.apache.hadoop.hdfs.DFSInputStream类的典型用法代码示例。如果您正苦于以下问题:Java DFSInputStream类的具体用法?Java DFSInputStream怎么用?Java DFSInputStream使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
DFSInputStream类属于org.apache.hadoop.hdfs包,在下文中一共展示了DFSInputStream类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createShortCircuitConf
import org.apache.hadoop.hdfs.DFSInputStream; //导入依赖的package包/类
private static Configuration createShortCircuitConf(String testName,
TemporarySocketDirectory sockDir) {
Configuration conf = new Configuration();
conf.set(DFS_CLIENT_CONTEXT, testName);
conf.setLong(DFS_BLOCK_SIZE_KEY, 4096);
conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(),
testName).getAbsolutePath());
conf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
conf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
false);
conf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
DFSInputStream.tcpReadsDisabledForTesting = true;
DomainSocket.disableBindPathValidation();
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
return conf;
}
示例2: createShortCircuitConf
import org.apache.hadoop.hdfs.DFSInputStream; //导入依赖的package包/类
private static Configuration createShortCircuitConf(String testName,
TemporarySocketDirectory sockDir) {
Configuration conf = new Configuration();
conf.set(DFS_CLIENT_CONTEXT, testName);
conf.setLong(DFS_BLOCK_SIZE_KEY, 4096);
conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(),
testName).getAbsolutePath());
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
false);
conf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
DFSInputStream.tcpReadsDisabledForTesting = true;
DomainSocket.disableBindPathValidation();
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
return conf;
}
示例3: tryGetLocalFile
import org.apache.hadoop.hdfs.DFSInputStream; //导入依赖的package包/类
private void tryGetLocalFile() {
if (tryGetLocalFileTimes >= TRY_GET_LOCAL_FILE_LIMIT) {
return;
}
if (isSingleBlock && HDFS_READ_HACK_ENABLE) {
try {
InputStream is = input.getWrappedStream();
if (is instanceof DFSInputStream) {
BlockReader blockReader = MemoryUtil.getDFSInputStream_blockReader(is);
if (blockReader != null && blockReader.isShortCircuit()) {
localFile = MemoryUtil.getBlockReaderLocal_dataIn(blockReader);
}
}
} catch (Throwable e) {
logger.debug("HDFS READ HACK failed.", e);
}
}
tryGetLocalFileTimes++;
}
示例4: readDFSPaths
import org.apache.hadoop.hdfs.DFSInputStream; //导入依赖的package包/类
/**
* lists, and then reads the first byte in all of the files in
* this hostname/process' namespace
* @return -1 on error, 0 on success
*/
private static int readDFSPaths() {
if (listDFSPaths() != 0) {
return -1;
}
try{
for (Map.Entry<String, OutputStream> file : files_.entrySet()) {
long startTime = System.nanoTime();
DFSInputStream os = dfsClient_.open(file.getKey());
timingOpen_.add(new Double((System.nanoTime() - startTime)/(1E9)));
os.read();
os.close();
}
} catch (IOException e) {
e.printStackTrace();
}
return 0;
}
示例5: addDirToMaps
import org.apache.hadoop.hdfs.DFSInputStream; //导入依赖的package包/类
private void addDirToMaps(Path dir, DFSClient client) throws IOException {
FileStatus[] children = dfs.listStatus(dir);
if (children == null) return;
for (FileStatus child: children) {
if (!child.isDir()) { // get block ids for file
Path path = child.getPath(); // paths will be unique
fileMap.put(path, new ArrayList<Long>());
DFSInputStream stm = client.open(child.getPath().toUri().getPath());
LocatedBlocks blocks = stm.fetchLocatedBlocks();
stm.close();
for (int i = 0; i < blocks.locatedBlockCount(); i++) {
Long blockId = blocks.get(i).getBlock().getBlockId();
fileMap.get(path).add(blockId); // add to file block list
blockRefMap.put(blockId, null); // mark as unrefereced
}
}
else {
// If child is a directory, recurse on it
addDirToMaps(child.getPath(), client);
}
}
}
示例6: inputStreamLoader
import org.apache.hadoop.hdfs.DFSInputStream; //导入依赖的package包/类
private CacheLoader<DFSInputStreamCaheKey, FSDataInputStream> inputStreamLoader() {
return new CacheLoader<DFSInputStreamCaheKey, FSDataInputStream>() {
@Override
public FSDataInputStream load(DFSInputStreamCaheKey key) throws Exception {
DFSClient client = getDfsClient(key.userId);
DFSInputStream dis = client.open(key.inodePath);
return client.createWrappedInputStream(dis);
}
};
}
示例7: open
import org.apache.hadoop.hdfs.DFSInputStream; //导入依赖的package包/类
@SuppressWarnings("deprecation")
@Override
public HdfsDataInputStream open(Path f, int bufferSize)
throws IOException, UnresolvedLinkException {
final DFSInputStream dfsis = dfs.open(getUriPath(f),
bufferSize, verifyChecksum);
return dfs.createWrappedInputStream(dfsis);
}
示例8: cacheInitialContents
import org.apache.hadoop.hdfs.DFSInputStream; //导入依赖的package包/类
private byte[] cacheInitialContents() throws IOException {
HdfsFileStatus status = dfsClient.getFileInfo(name);
byte[] content = new byte[(int)status.getLen()];
DFSInputStream in = null;
try {
in = dfsClient.open(name);
IOUtils.readFully(in, content, 0, content.length);
} finally {
in.close();
}
return content;
}
示例9: checkSalvagedRemains
import org.apache.hadoop.hdfs.DFSInputStream; //导入依赖的package包/类
public void checkSalvagedRemains() throws IOException {
int chainIdx = 0;
HdfsFileStatus status = dfsClient.getFileInfo(name);
long length = status.getLen();
int numBlocks = (int)((length + blockSize - 1) / blockSize);
DFSInputStream in = null;
byte[] blockBuffer = new byte[blockSize];
try {
for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
if (blocksToCorrupt.contains(blockIdx)) {
if (in != null) {
in.close();
in = null;
}
continue;
}
if (in == null) {
in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
chainIdx++;
}
int len = blockBuffer.length;
if (blockIdx == (numBlocks - 1)) {
// The last block might not be full-length
len = (int)(in.getFileLength() % blockSize);
if (len == 0) len = blockBuffer.length;
}
IOUtils.readFully(in, blockBuffer, 0, len);
int startIdx = blockIdx * blockSize;
for (int i = 0; i < len; i++) {
if (initialContents[startIdx + i] != blockBuffer[i]) {
throw new IOException("salvaged file " + name + " differed " +
"from what we expected on block " + blockIdx);
}
}
}
} finally {
IOUtils.cleanup(null, in);
}
}
示例10: testDoGetShouldCloseTheDFSInputStreamIfResponseGetOutPutStreamThrowsAnyException
import org.apache.hadoop.hdfs.DFSInputStream; //导入依赖的package包/类
@Test
public void testDoGetShouldCloseTheDFSInputStreamIfResponseGetOutPutStreamThrowsAnyException()
throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(1)
.build();
try {
Path testFile = createFile();
setUpForDoGetTest(cluster, testFile);
Mockito.doThrow(new IOException()).when(mockHttpServletResponse)
.getOutputStream();
DFSInputStream fsMock = Mockito.mock(DFSInputStream.class);
Mockito.doReturn(fsMock).when(clientMock).open(testFile.toString());
Mockito.doReturn(Long.valueOf(4)).when(fsMock).getFileLength();
try {
sfile.doGet(mockHttpServletRequest, mockHttpServletResponse);
fail("Not throwing the IOException");
} catch (IOException e) {
Mockito.verify(clientMock, Mockito.atLeastOnce()).close();
}
} finally {
cluster.shutdown();
}
}
示例11: open
import org.apache.hadoop.hdfs.DFSInputStream; //导入依赖的package包/类
public static ByteBufferReader open(org.apache.hadoop.fs.FileSystem fileSystem,
org.apache.hadoop.fs.Path path,
long size,
int blockCount,
long readBase) throws IOException {
FSDataInputStream stream = fileSystem.open(path);
if (HDFS_READ_HACK_ENABLE) {
if (IS_SHORT_CIRCUIT_LOCAL_READ_ENABLE == null) {
IS_SHORT_CIRCUIT_LOCAL_READ_ENABLE = Boolean.parseBoolean(fileSystem.getConf().get("dfs.client.read.shortcircuit", "false"));
}
if (IS_SHORT_CIRCUIT_LOCAL_READ_ENABLE) {
InputStream is = stream.getWrappedStream();
if (is instanceof DFSInputStream) {
// Close check sum if short circuit local read is enabled.
MemoryUtil.setDFSInputStream_verifyChecksum(is, false);
logger.debug("disable read check sum for: {}", path);
}
}
}
return new DFSByteBufferReader(
path.toString(),
stream,
size,
readBase,
stream,
blockCount);
}
示例12: findDFSClientInputStream
import org.apache.hadoop.hdfs.DFSInputStream; //导入依赖的package包/类
public static DFSInputStream findDFSClientInputStream(FSDataInputStream in)
throws SecurityException, NoSuchFieldException, IllegalArgumentException,
IllegalAccessException {
Field inField = FilterInputStream.class.getDeclaredField("in");
inField.setAccessible(true);
return (DFSInputStream) inField.get(in);
}
示例13: getDeadNodes
import org.apache.hadoop.hdfs.DFSInputStream; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public static ConcurrentHashMap<DatanodeInfo, DatanodeInfo> getDeadNodes(
DFSInputStream in) throws SecurityException, IllegalArgumentException,
NoSuchFieldException, IllegalAccessException {
Field deadNodesField = DFSInputStream.class.getDeclaredField("deadNodes");
deadNodesField.setAccessible(true);
return (ConcurrentHashMap<DatanodeInfo, DatanodeInfo>) deadNodesField
.get(in);
}
示例14: open
import org.apache.hadoop.hdfs.DFSInputStream; //导入依赖的package包/类
public DFSInputStream open(String snapshotId, String src) throws IOException {
LocatedBlocksWithMetaInfo blocks[] = getLocatedBlocks(snapshotId, src);
// Not strictly correct. block.length = 1 could mean directory with
// one file. Might want to add a file specific API.
if (blocks == null || blocks.length != 1) {
throw new IOException("File at " + src + " doesn't exist in snapshot");
}
return client.open(blocks[0]);
}
示例15: checkSalvagedRemains
import org.apache.hadoop.hdfs.DFSInputStream; //导入依赖的package包/类
public void checkSalvagedRemains() throws IOException {
int chainIdx = 0;
HdfsFileStatus status = dfsClient.getFileInfo(name);
long length = status.getLen();
int numBlocks = (int)((length + blockSize - 1) / blockSize);
DFSInputStream in = null;
byte[] blockBuffer = new byte[blockSize];
try {
for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
if (blocksToCorrupt.contains(blockIdx)) {
if (in != null) {
in.close();
in = null;
}
continue;
}
if (in == null) {
in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
chainIdx++;
}
int len = blockBuffer.length;
if (blockIdx == (numBlocks - 1)) {
// The last block might not be full-length
len = (int)(in.getFileLength() % blockSize);
if (len == 0) len = blockBuffer.length;
}
IOUtils.readFully(in, blockBuffer, 0, (int)len);
int startIdx = blockIdx * blockSize;
for (int i = 0; i < len; i++) {
if (initialContents[startIdx + i] != blockBuffer[i]) {
throw new IOException("salvaged file " + name + " differed " +
"from what we expected on block " + blockIdx);
}
}
}
} finally {
IOUtils.cleanup(null, in);
}
}