本文整理汇总了Java中org.apache.hadoop.fs.FSDataInputStream类的典型用法代码示例。如果您正苦于以下问题:Java FSDataInputStream类的具体用法?Java FSDataInputStream怎么用?Java FSDataInputStream使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
FSDataInputStream类属于org.apache.hadoop.fs包,在下文中一共展示了FSDataInputStream类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: download
import org.apache.hadoop.fs.FSDataInputStream; //导入依赖的package包/类
/**
* 从hadoop中下载文件
*
* @param taskName
* @param filePath
*/
public static void download(String taskName, String filePath, boolean existDelete) {
File file = new File(filePath);
if (file.exists()) {
if (existDelete) {
file.deleteOnExit();
} else {
return;
}
}
String hadoopAddress = propertyConfig.getProperty("sqoop.task." + taskName + ".tolink.linkConfig.uri");
String itemmodels = propertyConfig.getProperty("sqoop.task." + taskName + ".recommend.itemmodels");
try {
DistributedFileSystem distributedFileSystem = distributedFileSystem(hadoopAddress);
FSDataInputStream fsDataInputStream = distributedFileSystem.open(new Path(itemmodels));
byte[] bs = new byte[fsDataInputStream.available()];
fsDataInputStream.read(bs);
log.info(new String(bs));
FileOutputStream fileOutputStream = new FileOutputStream(new File(filePath));
IOUtils.write(bs, fileOutputStream);
IOUtils.closeQuietly(fileOutputStream);
} catch (IOException e) {
log.error(e);
}
}
示例2: open
import org.apache.hadoop.fs.FSDataInputStream; //导入依赖的package包/类
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
Path absolutePath = toAbsolutePath(f);
checkPath(absolutePath);
// Handle root
if (absolutePath.isRoot()) {
throw new AccessControlException("Cannot open " + f);
}
try {
RemotePath remotePath = getRemotePath(absolutePath);
FileSystem delegate = getDelegateFileSystem(remotePath.address);
return delegate.open(remotePath.path, bufferSize);
} catch (IllegalArgumentException e) {
throw (FileNotFoundException) (new FileNotFoundException("No file " + absolutePath).initCause(e));
}
}
示例3: loadSparseDoublePartition
import org.apache.hadoop.fs.FSDataInputStream; //导入依赖的package包/类
private static void loadSparseDoublePartition(SparseDoubleModel model, FSDataInputStream input,
ModelPartitionMeta partMeta) throws IOException {
int rowNum = input.readInt();
int rowId = 0;
int nnz = 0;
int totalNNZ = 0;
Int2DoubleOpenHashMap row = null;
for (int i = 0; i < rowNum; i++) {
rowId = input.readInt();
nnz = input.readInt();
totalNNZ = (int) (nnz * (model.col) / (partMeta.getEndCol() - partMeta.getStartCol()));
row = model.getRow(rowId, partMeta.getPartId(), totalNNZ);
for (int j = 0; j < nnz; j++) {
row.put(input.readInt(), input.readDouble());
}
}
}
示例4: readOnDiskMapOutput
import org.apache.hadoop.fs.FSDataInputStream; //导入依赖的package包/类
private void readOnDiskMapOutput(Configuration conf, FileSystem fs, Path path,
List<String> keys, List<String> values) throws IOException {
FSDataInputStream in = CryptoUtils.wrapIfNecessary(conf, fs.open(path));
IFile.Reader<Text, Text> reader = new IFile.Reader<Text, Text>(conf, in,
fs.getFileStatus(path).getLen(), null, null);
DataInputBuffer keyBuff = new DataInputBuffer();
DataInputBuffer valueBuff = new DataInputBuffer();
Text key = new Text();
Text value = new Text();
while (reader.nextRawKey(keyBuff)) {
key.readFields(keyBuff);
keys.add(key.toString());
reader.nextRawValue(valueBuff);
value.readFields(valueBuff);
values.add(value.toString());
}
}
示例5: testOnMessageEOF
import org.apache.hadoop.fs.FSDataInputStream; //导入依赖的package包/类
@Test
public void testOnMessageEOF() throws IOException {
InputStream mis = mock(InputStream.class, withSettings().extraInterfaces(Seekable.class, PositionedReadable.class));
doReturn(-1).when(mis).read(any(byte[].class), anyInt(), anyInt());
FSDataInputStream fdis = new FSDataInputStream(mis);
Response response = getResponse(7L, 4096, fdis);
InOrder inOrder = Mockito.inOrder(mis);
inOrder.verify((Seekable) mis).seek(7);
inOrder.verify(mis).read(any(byte[].class), anyInt(), anyInt());
assertEquals(-1, ((DFS.GetFileDataResponse) response.pBody).getRead());
assertEquals(0, response.dBodies.length);
}
示例6: testBasicReadWriteIO
import org.apache.hadoop.fs.FSDataInputStream; //导入依赖的package包/类
public void testBasicReadWriteIO() throws IOException {
FSDataOutputStream writeStream = fs.create(new Path(TEST_PATH));
writeStream.write(TEST_DATA.getBytes());
writeStream.flush();
writeStream.close();
FSDataInputStream readStream = fs.open(new Path(TEST_PATH));
BufferedReader br = new BufferedReader(new InputStreamReader(readStream));
String line = "";
StringBuffer stringBuffer = new StringBuffer();
while ((line = br.readLine()) != null) {
stringBuffer.append(line);
}
br.close();
assert(TEST_DATA.equals(stringBuffer.toString()));
}
示例7: getPreviousJobHistoryFileStream
import org.apache.hadoop.fs.FSDataInputStream; //导入依赖的package包/类
public static FSDataInputStream getPreviousJobHistoryFileStream(
Configuration conf, ApplicationAttemptId applicationAttemptId)
throws IOException {
FSDataInputStream in = null;
Path historyFile = null;
String jobId =
TypeConverter.fromYarn(applicationAttemptId.getApplicationId())
.toString();
String jobhistoryDir =
JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(conf, jobId);
Path histDirPath =
FileContext.getFileContext(conf).makeQualified(new Path(jobhistoryDir));
FileContext fc = FileContext.getFileContext(histDirPath.toUri(), conf);
// read the previous history file
historyFile =
fc.makeQualified(JobHistoryUtils.getStagingJobHistoryFile(histDirPath,
jobId, (applicationAttemptId.getAttemptId() - 1)));
LOG.info("History file is at " + historyFile);
in = fc.open(historyFile);
return in;
}
示例8: verifySeek
import org.apache.hadoop.fs.FSDataInputStream; //导入依赖的package包/类
/** test seek */
static void verifySeek(FileSystem fs, Path p, long offset, long length,
byte[] buf, byte[] expected) throws IOException {
long remaining = length - offset;
long checked = 0;
LOG.info("XXX SEEK: offset=" + offset + ", remaining=" + remaining);
final Ticker t = new Ticker("SEEK", "offset=%d, remaining=%d",
offset, remaining);
final FSDataInputStream in = fs.open(p, 64 << 10);
in.seek(offset);
for(; remaining > 0; ) {
t.tick(checked, "offset=%d, remaining=%d", offset, remaining);
final int n = (int)Math.min(remaining, buf.length);
in.readFully(buf, 0, n);
checkData(offset, remaining, n, buf, expected);
offset += n;
remaining -= n;
checked += n;
}
in.close();
t.end(checked);
}
示例9: testPositionalReadPrematureEOF
import org.apache.hadoop.fs.FSDataInputStream; //导入依赖的package包/类
@Test
public void testPositionalReadPrematureEOF() throws IOException {
long position = 0;
int bufOffset = 0;
int necessaryLen = 10;
int extraLen = 0;
int totalLen = necessaryLen + extraLen;
byte[] buf = new byte[totalLen];
FSDataInputStream in = mock(FSDataInputStream.class);
when(in.read(position, buf, bufOffset, totalLen)).thenReturn(9);
when(in.read(position, buf, bufOffset, totalLen)).thenReturn(-1);
exception.expect(IOException.class);
exception.expectMessage("EOF");
HFileBlock.positionalReadWithExtra(in, position, buf, bufOffset,
necessaryLen, extraLen);
}
示例10: checkFile
import org.apache.hadoop.fs.FSDataInputStream; //导入依赖的package包/类
private void checkFile(FileSystem fileSys, Path name) throws IOException {
BlockLocation[] locations = fileSys.getFileBlockLocations(
fileSys.getFileStatus(name), 0, fileSize);
assertEquals("Number of blocks", fileSize, locations.length);
FSDataInputStream stm = fileSys.open(name);
byte[] expected = new byte[fileSize];
if (simulatedStorage) {
for (int i = 0; i < expected.length; ++i) {
expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
}
} else {
Random rand = new Random(seed);
rand.nextBytes(expected);
}
// do a sanity check. Read the file
byte[] actual = new byte[fileSize];
stm.readFully(0, actual);
checkAndEraseData(actual, 0, expected, "Read Sanity Test");
stm.close();
}
示例11: Reader
import org.apache.hadoop.fs.FSDataInputStream; //导入依赖的package包/类
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
示例12: forceSecureOpenFSDataInputStream
import org.apache.hadoop.fs.FSDataInputStream; //导入依赖的package包/类
/**
* Same as openFSDataInputStream except that it will run even if security is
* off. This is used by unit tests.
*/
@VisibleForTesting
protected static FSDataInputStream forceSecureOpenFSDataInputStream(
File file,
String expectedOwner, String expectedGroup) throws IOException {
final FSDataInputStream in =
rawFilesystem.open(new Path(file.getAbsolutePath()));
boolean success = false;
try {
Stat stat = NativeIO.POSIX.getFstat(in.getFileDescriptor());
checkStat(file, stat.getOwner(), stat.getGroup(), expectedOwner,
expectedGroup);
success = true;
return in;
} finally {
if (!success) {
in.close();
}
}
}
示例13: testReadFile
import org.apache.hadoop.fs.FSDataInputStream; //导入依赖的package包/类
/**
* Test writing to a file and reading its value.
*
* @throws Exception
*/
@Test
public void testReadFile() throws Exception {
byte[] data = "yaks".getBytes();
Path file = touch(localFs, name.getMethodName().toLowerCase(), data);
FSDataInputStream is = null;
try {
is = sftpFs.open(file);
byte[] b = new byte[data.length];
is.read(b);
assertArrayEquals(data, b);
} finally {
if (is != null) {
is.close();
}
}
assertTrue(sftpFs.delete(file, false));
}
示例14: readDataset
import org.apache.hadoop.fs.FSDataInputStream; //导入依赖的package包/类
/**
* Read the file and convert to a byte dataset.
* This implements readfully internally, so that it will read
* in the file without ever having to seek()
* @param fs filesystem
* @param path path to read from
* @param len length of data to read
* @return the bytes
* @throws IOException IO problems
*/
public static byte[] readDataset(FileSystem fs, Path path, int len)
throws IOException {
FSDataInputStream in = fs.open(path);
byte[] dest = new byte[len];
int offset =0;
int nread = 0;
try {
while (nread < len) {
int nbytes = in.read(dest, offset + nread, len - nread);
if (nbytes < 0) {
throw new EOFException("End of file reached before reading fully.");
}
nread += nbytes;
}
} finally {
in.close();
}
return dest;
}
示例15: testNegativeSeek
import org.apache.hadoop.fs.FSDataInputStream; //导入依赖的package包/类
/**
* Test (expected to throw IOE) for negative
* <code>FSDataInpuStream#seek</code> argument
*/
@Test (expected=IOException.class)
public void testNegativeSeek() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
try {
Path seekFile = new Path("seekboundaries.dat");
DFSTestUtil.createFile(
fs,
seekFile,
ONEMB,
fs.getDefaultReplication(seekFile),
seed);
FSDataInputStream stream = fs.open(seekFile);
// Perform "safe seek" (expected to pass)
stream.seek(65536);
assertEquals(65536, stream.getPos());
// expect IOE for this call
stream.seek(-73);
} finally {
fs.close();
cluster.shutdown();
}
}