本文整理汇总了Java中org.apache.hadoop.fs.FSDataOutputStream.close方法的典型用法代码示例。如果您正苦于以下问题:Java FSDataOutputStream.close方法的具体用法?Java FSDataOutputStream.close怎么用?Java FSDataOutputStream.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FSDataOutputStream
的用法示例。
在下文中一共展示了FSDataOutputStream.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: unsortedWithSomeCodec
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
void unsortedWithSomeCodec(String codec) throws IOException {
Path uTfile = new Path(ROOT, "unsorted.tfile");
FSDataOutputStream fout = createFSOutput(uTfile);
Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
writeRecords(writer);
writer.close();
fout.close();
FSDataInputStream fin = fs.open(uTfile);
Reader reader =
new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);
Scanner scanner = reader.createScanner();
readAllRecords(scanner);
scanner.close();
reader.close();
fin.close();
fs.delete(uTfile, true);
}
示例2: testVLongRandom
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public void testVLongRandom() throws IOException {
int count = 1024 * 1024;
long data[] = new long[count];
Random rng = new Random();
for (int i = 0; i < data.length; ++i) {
int shift = rng.nextInt(Long.SIZE) + 1;
long mask = (1L << shift) - 1;
long a = ((long) rng.nextInt()) << 32;
long b = ((long) rng.nextInt()) & 0xffffffffL;
data[i] = (a + b) & mask;
}
FSDataOutputStream out = fs.create(path);
for (int i = 0; i < data.length; ++i) {
Utils.writeVLong(out, data[i]);
}
out.close();
FSDataInputStream in = fs.open(path);
for (int i = 0; i < data.length; ++i) {
Assert.assertEquals(Utils.readVLong(in), data[i]);
}
in.close();
fs.delete(path, false);
}
示例3: copyPartitions
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void copyPartitions(Path mapOutputPath, Path indexPath)
throws IOException {
FileSystem localFs = FileSystem.getLocal(jobConf);
FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
FSDataOutputStream rawOutput = rfs.create(mapOutputPath, true, BUF_SIZE);
SpillRecord spillRecord = new SpillRecord(numberOfPartitions);
IndexRecord indexRecord = new IndexRecord();
for (int i = 0; i < numberOfPartitions; i++) {
indexRecord.startOffset = rawOutput.getPos();
byte buffer[] = outStreams[i].toByteArray();
IFileOutputStream checksumOutput = new IFileOutputStream(rawOutput);
checksumOutput.write(buffer);
// Write checksum.
checksumOutput.finish();
// Write index record
indexRecord.rawLength = (long)buffer.length;
indexRecord.partLength = rawOutput.getPos() - indexRecord.startOffset;
spillRecord.putIndex(indexRecord, i);
reporter.progress();
}
rawOutput.close();
spillRecord.writeToFile(indexPath, jobConf);
}
示例4: testFavoredNodesEndToEndForAppend
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testFavoredNodesEndToEndForAppend() throws Exception {
// create 10 files with random preferred nodes
for (int i = 0; i < NUM_FILES; i++) {
Random rand = new Random(System.currentTimeMillis() + i);
// pass a new created rand so as to get a uniform distribution each time
// without too much collisions (look at the do-while loop in getDatanodes)
InetSocketAddress datanode[] = getDatanodes(rand);
Path p = new Path("/filename" + i);
// create and close the file.
dfs.create(p, FsPermission.getDefault(), true, 4096, (short) 3, 4096L,
null, null).close();
// re-open for append
FSDataOutputStream out = dfs.append(p, EnumSet.of(CreateFlag.APPEND),
4096, null, datanode);
out.write(SOME_BYTES);
out.close();
BlockLocation[] locations = getBlockLocations(p);
// verify the files got created in the right nodes
for (BlockLocation loc : locations) {
String[] hosts = loc.getNames();
String[] hosts1 = getStringForInetSocketAddrs(datanode);
assertTrue(compareNodes(hosts, hosts1));
}
}
}
示例5: writeSnapshotInfo
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
* Write the snapshot description into the working directory of a snapshot
* @param snapshot description of the snapshot being taken
* @param workingDir working directory of the snapshot
* @param fs {@link FileSystem} on which the snapshot should be taken
* @throws IOException if we can't reach the filesystem and the file cannot be cleaned up on
* failure
*/
public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingDir, FileSystem fs)
throws IOException {
FsPermission perms = FSUtils.getFilePermissions(fs, fs.getConf(),
HConstants.DATA_FILE_UMASK_KEY);
Path snapshotInfo = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
try {
FSDataOutputStream out = FSUtils.create(fs, snapshotInfo, perms, true);
try {
snapshot.writeTo(out);
} finally {
out.close();
}
} catch (IOException e) {
// if we get an exception, try to remove the snapshot info
if (!fs.delete(snapshotInfo, false)) {
String msg = "Couldn't delete snapshot info file: " + snapshotInfo;
LOG.error(msg);
throw new IOException(msg);
}
}
}
示例6: testMultiChunkFile
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test
public void testMultiChunkFile() throws Exception {
Path testPath = new Path(TEST_ROOT_DIR, "testMultiChunk");
FSDataOutputStream fout = localFs.create(testPath);
for (int i = 0; i < 1000; i++) {
fout.write(("testing" + i).getBytes());
}
fout.close();
// Exercise some boundary cases - a divisor of the chunk size
// the chunk size, 2x chunk size, and +/-1 around these.
readFile(localFs, testPath, 128);
readFile(localFs, testPath, 511);
readFile(localFs, testPath, 512);
readFile(localFs, testPath, 513);
readFile(localFs, testPath, 1023);
readFile(localFs, testPath, 1024);
readFile(localFs, testPath, 1025);
}
示例7: testManyPartitionedFile
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
* Test sticks up a very large partitioned file and verifies that
* it comes back unchanged.
* @throws Throwable
*/
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
final Path path = new Path("/test/testManyPartitionedFile");
int len = PART_SIZE_BYTES * 15;
final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
FSDataOutputStream out = fs.create(path,
false,
getBufferSize(),
(short) 1,
BLOCK_SIZE);
out.write(src, 0, src.length);
int expected =
getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
out.close();
assertPartitionsWritten("write completed", out, expected);
assertEquals("too few bytes written", len,
SwiftNativeFileSystem.getBytesWritten(out));
assertEquals("too few bytes uploaded", len,
SwiftNativeFileSystem.getBytesUploaded(out));
//now we verify that the data comes back. If it
//doesn't, it means that the ordering of the partitions
//isn't right
byte[] dest = readDataset(fs, path, len);
//compare data
SwiftTestUtils.compareByteArrays(src, dest, len);
//finally, check the data
FileStatus[] stats = fs.listStatus(path);
assertEquals("wrong entry count in "
+ SwiftTestUtils.dumpStats(path.toString(), stats),
expected, stats.length);
}
示例8: createInputFile
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void createInputFile(Path path, int rowCount)
throws IOException {
if (fs.exists(path)) {
fs.delete(path, true);
}
FSDataOutputStream os = fs.create(path);
for (int i = 0; i < rowCount; i++) {
String s = i + "\n";
os.writeChars(s);
}
os.close();
}
示例9: testMetaBlocks
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test
public void testMetaBlocks() throws IOException {
Path mFile = new Path(ROOT, "meta.tfile");
FSDataOutputStream fout = createFSOutput(mFile);
Writer writer = new Writer(fout, minBlockSize, "none", null, conf);
someTestingWithMetaBlock(writer, "none");
writer.close();
fout.close();
FSDataInputStream fin = fs.open(mFile);
Reader reader = new Reader(fin, fs.getFileStatus(mFile).getLen(), conf);
someReadingWithMetaBlock(reader);
fs.delete(mFile, true);
reader.close();
fin.close();
}
示例10: testReadClosedStream
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test
public void testReadClosedStream() throws IOException {
final Path testFile = new Path("/testfile+2");
FSDataOutputStream os = hdfs.create(testFile, true);
os.writeBytes("0123456789");
os.close();
// ByteRangeInputStream delays opens until reads. Make sure it doesn't
// open a closed stream that has never been opened
FSDataInputStream in = hftpFs.open(testFile);
in.close();
checkClosedStream(in);
checkClosedStream(in.getWrappedStream());
// force the stream to connect and then close it
in = hftpFs.open(testFile);
int ch = in.read();
assertEquals('0', ch);
in.close();
checkClosedStream(in);
checkClosedStream(in.getWrappedStream());
// make sure seeking doesn't automagically reopen the stream
in.seek(4);
checkClosedStream(in);
checkClosedStream(in.getWrappedStream());
}
示例11: WriteDataToHDFS
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void WriteDataToHDFS(FileSystem fs, Path file, int dataSize)
throws Exception {
FSDataOutputStream out = fs.create(file);
byte [] data = new byte[dataSize];
out.write(data, 0, dataSize);
out.close();
}
示例12: testForNonDefaultFileSystem
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
* Test when input files are from non-default file systems
*/
@Test
public void testForNonDefaultFileSystem() throws Throwable {
Configuration conf = new Configuration();
// use a fake file system scheme as default
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, DUMMY_FS_URI);
// default fs path
assertEquals(DUMMY_FS_URI, FileSystem.getDefaultUri(conf).toString());
// add a local file
Path localPath = new Path("testFile1");
FileSystem lfs = FileSystem.getLocal(conf);
FSDataOutputStream dos = lfs.create(localPath);
dos.writeChars("Local file for CFIF");
dos.close();
Job job = Job.getInstance(conf);
FileInputFormat.setInputPaths(job, lfs.makeQualified(localPath));
DummyInputFormat inFormat = new DummyInputFormat();
List<InputSplit> splits = inFormat.getSplits(job);
assertTrue(splits.size() > 0);
for (InputSplit s : splits) {
CombineFileSplit cfs = (CombineFileSplit)s;
for (Path p : cfs.getPaths()) {
assertEquals(p.toUri().getScheme(), "file");
}
}
}
示例13: testDeleteOnExit
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
* Test deleteOnExit
*/
@Test
public void testDeleteOnExit() throws IOException {
Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
FileSystem localfs = FileSystem.getLocal(conf);
try {
// Creates files in HDFS and local file system.
//
Path file1 = new Path("filestatus.dat");
Path file2 = new Path("filestatus2.dat");
Path file3 = new Path("filestatus3.dat");
FSDataOutputStream stm1 = createFile(fs, file1, 1);
FSDataOutputStream stm2 = createFile(fs, file2, 1);
FSDataOutputStream stm3 = createFile(localfs, file3, 1);
System.out.println("DeleteOnExit: Created files.");
// write to files and close. Purposely, do not close file2.
writeFile(stm1);
writeFile(stm3);
stm1.close();
stm2.close();
stm3.close();
// set delete on exit flag on files.
fs.deleteOnExit(file1);
fs.deleteOnExit(file2);
localfs.deleteOnExit(file3);
// close the file system. This should make the above files
// disappear.
fs.close();
localfs.close();
fs = null;
localfs = null;
// reopen file system and verify that file does not exist.
fs = cluster.getFileSystem();
localfs = FileSystem.getLocal(conf);
assertTrue(file1 + " still exists inspite of deletOnExit set.",
!fs.exists(file1));
assertTrue(file2 + " still exists inspite of deletOnExit set.",
!fs.exists(file2));
assertTrue(file3 + " still exists inspite of deletOnExit set.",
!localfs.exists(file3));
System.out.println("DeleteOnExit successful.");
} finally {
IOUtils.closeStream(fs);
IOUtils.closeStream(localfs);
cluster.shutdown();
}
}
示例14: testVerifyChecksum
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test
public void testVerifyChecksum() throws Exception {
Path testPath = new Path(TEST_ROOT_DIR, "testPath");
Path testPath11 = new Path(TEST_ROOT_DIR, "testPath11");
FSDataOutputStream fout = localFs.create(testPath);
fout.write("testing".getBytes());
fout.close();
fout = localFs.create(testPath11);
fout.write("testing you".getBytes());
fout.close();
// Exercise some boundary cases - a divisor of the chunk size
// the chunk size, 2x chunk size, and +/-1 around these.
readFile(localFs, testPath, 128);
readFile(localFs, testPath, 511);
readFile(localFs, testPath, 512);
readFile(localFs, testPath, 513);
readFile(localFs, testPath, 1023);
readFile(localFs, testPath, 1024);
readFile(localFs, testPath, 1025);
localFs.delete(localFs.getChecksumFile(testPath), true);
assertTrue("checksum deleted", !localFs.exists(localFs.getChecksumFile(testPath)));
//copying the wrong checksum file
FileUtil.copy(localFs, localFs.getChecksumFile(testPath11), localFs,
localFs.getChecksumFile(testPath),false,true,localFs.getConf());
assertTrue("checksum exists", localFs.exists(localFs.getChecksumFile(testPath)));
boolean errorRead = false;
try {
readFile(localFs, testPath, 1024);
}catch(ChecksumException ie) {
errorRead = true;
}
assertTrue("error reading", errorRead);
//now setting verify false, the read should succeed
localFs.setVerifyChecksum(false);
String str = readFile(localFs, testPath, 1024).toString();
assertTrue("read", "testing".equals(str));
}
示例15: doWriteOverFailoverTest
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void doWriteOverFailoverTest(TestScenario scenario,
MethodToTestIdempotence methodToTest) throws Exception {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
// Don't check replication periodically.
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
FSDataOutputStream stm = null;
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(3)
.build();
try {
int sizeWritten = 0;
cluster.waitActive();
cluster.transitionToActive(0);
Thread.sleep(500);
LOG.info("Starting with NN 0 active");
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
stm = fs.create(TEST_PATH);
// write a block and a half
AppendTestUtil.write(stm, 0, BLOCK_AND_A_HALF);
sizeWritten += BLOCK_AND_A_HALF;
// Make sure all of the blocks are written out before failover.
stm.hflush();
LOG.info("Failing over to NN 1");
scenario.run(cluster);
// NOTE: explicitly do *not* make any further metadata calls
// to the NN here. The next IPC call should be to allocate the next
// block. Any other call would notice the failover and not test
// idempotence of the operation (HDFS-3031)
FSNamesystem ns1 = cluster.getNameNode(1).getNamesystem();
BlockManagerTestUtil.updateState(ns1.getBlockManager());
assertEquals(0, ns1.getPendingReplicationBlocks());
assertEquals(0, ns1.getCorruptReplicaBlocks());
assertEquals(0, ns1.getMissingBlocksCount());
// If we're testing allocateBlock()'s idempotence, write another
// block and a half, so we have to allocate a new block.
// Otherise, don't write anything, so our next RPC will be
// completeFile() if we're testing idempotence of that operation.
if (methodToTest == MethodToTestIdempotence.ALLOCATE_BLOCK) {
// write another block and a half
AppendTestUtil.write(stm, sizeWritten, BLOCK_AND_A_HALF);
sizeWritten += BLOCK_AND_A_HALF;
}
stm.close();
stm = null;
AppendTestUtil.check(fs, TEST_PATH, sizeWritten);
} finally {
IOUtils.closeStream(stm);
cluster.shutdown();
}
}