本文整理汇总了Java中org.apache.hadoop.fs.BufferedFSInputStream类的典型用法代码示例。如果您正苦于以下问题:Java BufferedFSInputStream类的具体用法?Java BufferedFSInputStream怎么用?Java BufferedFSInputStream使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
BufferedFSInputStream类属于org.apache.hadoop.fs包,在下文中一共展示了BufferedFSInputStream类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: open
import org.apache.hadoop.fs.BufferedFSInputStream; //导入依赖的package包/类
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Opening file: " + f.toString());
}
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
FileMetadata meta = store.retrieveMetadata(key);
if (meta == null) {
throw new FileNotFoundException(f.toString());
}
if (meta.isDir()) {
throw new FileNotFoundException(f.toString()
+ " is a directory not a file.");
}
return new FSDataInputStream(new BufferedFSInputStream(
new NativeAzureFsInputStream(store.retrieve(key), key, meta.getLength()), bufferSize));
}
示例2: open
import org.apache.hadoop.fs.BufferedFSInputStream; //导入依赖的package包/类
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
LOG.debug("Opening file: {}", f.toString());
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
FileMetadata meta = store.retrieveMetadata(key);
if (meta == null) {
throw new FileNotFoundException(f.toString());
}
if (meta.isDir()) {
throw new FileNotFoundException(f.toString()
+ " is a directory not a file.");
}
return new FSDataInputStream(new BufferedFSInputStream(
new NativeAzureFsInputStream(store.retrieve(key), key, meta.getLength()), bufferSize));
}
示例3: open
import org.apache.hadoop.fs.BufferedFSInputStream; //导入依赖的package包/类
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
FileStatus fs = getFileStatus(f); // will throw if the file doesn't exist
if (fs.isDirectory()) {
throw new FileNotFoundException("'" + f + "' is a directory");
}
LOG.info("Opening '" + f + "' for reading");
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
return new FSDataInputStream(new BufferedFSInputStream(
new NativeS3FsInputStream(store, statistics, store.retrieve(key), key), bufferSize));
}
示例4: open
import org.apache.hadoop.fs.BufferedFSInputStream; //导入依赖的package包/类
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
if (!exists(f)) {
throw new FileNotFoundException(f.toString());
}
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
return new FSDataInputStream(new BufferedFSInputStream(
new NativeS3FsInputStream(store.retrieve(key), key), bufferSize));
}
示例5: open
import org.apache.hadoop.fs.BufferedFSInputStream; //导入依赖的package包/类
@Override
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
SftpGetMonitor monitor = new SftpGetMonitor();
try {
ChannelSftp channelSftp = fsHelper.getSftpChannel();
InputStream is = channelSftp.get(path.toString(), monitor);
return new FSDataInputStream(new BufferedFSInputStream(new SftpFsFileInputStream(is, channelSftp), bufferSize));
} catch (SftpException e) {
throw new IOException(e);
}
}
示例6: open
import org.apache.hadoop.fs.BufferedFSInputStream; //导入依赖的package包/类
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
FileStatus fs = getFileStatus(f); // will throw if the file doesn't exist
if (fs.isDirectory()) {
throw new IOException("'" + f + "' is a directory");
}
LOG.info("Opening '" + f + "' for reading");
Path absolutePath = makeAbsolute(f);
String key = pathToKey(absolutePath);
return new FSDataInputStream(new BufferedFSInputStream(
new NativeS3FsInputStream(store, statistics, store.retrieve(key), key), bufferSize));
}
示例7: open
import org.apache.hadoop.fs.BufferedFSInputStream; //导入依赖的package包/类
@Override
public FSDataInputStream open(Path path, int bufferSize)
throws IOException
{
return new FSDataInputStream(
new BufferedFSInputStream(
new PrestoS3InputStream(s3, uri.getHost(), path, maxAttempts, maxBackoffTime, maxRetryTime),
bufferSize));
}
示例8: open
import org.apache.hadoop.fs.BufferedFSInputStream; //导入依赖的package包/类
@Override
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
SftpGetMonitor monitor = new SftpGetMonitor();
try {
ChannelSftp channelSftp = this.fsHelper.getSftpChannel();
InputStream is = channelSftp.get(HadoopUtils.toUriPath(path), monitor);
return new FSDataInputStream(new BufferedFSInputStream(new SftpFsHelper.SftpFsFileInputStream(is, channelSftp), bufferSize));
} catch (SftpException e) {
throw new IOException(e);
}
}
示例9: open
import org.apache.hadoop.fs.BufferedFSInputStream; //导入依赖的package包/类
@Override
@Nonnull
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
path = checkNotNull(path);
FileStatus fs = getFileStatus(path); // will throw if the file doesn't exist
if (fs.isDirectory()) throw new FileNotFoundException("'" + path + "' is a directory");
LOG.info("Opening '{}' for reading", path);
Path absolutePath = makeAbsolute(path);
String key = pathToKey(absolutePath);
return new FSDataInputStream(new BufferedFSInputStream(new OSSFileInputStream(store, key, of(statistics)), bufferSize));
}
示例10: open
import org.apache.hadoop.fs.BufferedFSInputStream; //导入依赖的package包/类
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
Path baseTarPath = getBaseTarPath(f);
String inFile = getFileInArchive(f);
if (inFile == null)
throw new IOException("TAR FileSystem: Can not open the whole TAR");
// adjust for the header
long offset = index.getOffset(inFile);
long size = index.getSize(inFile);
FSDataInputStream in = underlyingFS.open(baseTarPath);
in.seek(offset - 512);
TarArchiveEntry entry = readHeaderEntry(in);
if (!entry.getName().equals(inFile)) {
LOG.fatal(
"Index file is corrupt." +
"Requested filename is present in index " +
"but absent in TAR.");
throw new IOException("Requested filename does not match ");
}
return new FSDataInputStream(
new BufferedFSInputStream(
new SeekableTarInputStream(in, size, offset),
bufferSize));
}
示例11: open
import org.apache.hadoop.fs.BufferedFSInputStream; //导入依赖的package包/类
@Override
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
if (!exists(path)) {
throw new FileNotFoundException(path.toString());
}
return new FSDataInputStream(new BufferedFSInputStream(
new LFSInputStream(path), bufferSize));
}