本文整理汇总了Java中org.apache.hadoop.io.IOUtils.closeStream方法的典型用法代码示例。如果您正苦于以下问题:Java IOUtils.closeStream方法的具体用法?Java IOUtils.closeStream怎么用?Java IOUtils.closeStream使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.io.IOUtils
的用法示例。
在下文中一共展示了IOUtils.closeStream方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
Configuration conf = new Configuration();
conf.addResource("core-site.xml");
conf.addResource("hdfs-site.xml");
conf.addResource("yarn-site.xml");
// 没开kerberos,注释下面两行
UserGroupInformation.setConfiguration(conf);
UserGroupInformation.loginUserFromKeytab("[email protected]","E:\\星环\\任务\\2016年11月28日\\hdfs.keytab");
String localFile = "E:\\星环\\yarn-site.xml";
InputStream in = new BufferedInputStream(new FileInputStream(localFile));
Path p = new Path( "/tmp/yarn-site.xml");
FileSystem fs = p.getFileSystem(conf);
OutputStream out = fs.create(p);
IOUtils.copyBytes(in, out, conf);
fs.close();
IOUtils.closeStream(in);
}
示例2: testOfflineImageViewerHelpMessage
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Test
public void testOfflineImageViewerHelpMessage() throws Throwable {
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(bytes);
final PrintStream oldOut = System.out;
try {
System.setOut(out);
int status = OfflineImageViewerPB.run(new String[] { "-h" });
assertTrue("Exit code returned for help option is incorrect", status == 0);
Assert.assertFalse(
"Invalid Command error displayed when help option is passed.", bytes
.toString().contains("Error parsing command-line options"));
status =
OfflineImageViewerPB.run(new String[] { "-h", "-i",
originalFsimage.getAbsolutePath(), "-o", "-", "-p",
"FileDistribution", "-maxSize", "512", "-step", "8" });
Assert.assertTrue(
"Exit code returned for help with other option is incorrect",
status == -1);
} finally {
System.setOut(oldOut);
IOUtils.closeStream(out);
}
}
示例3: testRmWithNonexistentGlob
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Test (timeout = 30000)
public void testRmWithNonexistentGlob() throws Exception {
Configuration conf = new Configuration();
FsShell shell = new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream err = new PrintStream(bytes);
final PrintStream oldErr = System.err;
System.setErr(err);
final String results;
try {
int exit = shell.run(new String[]{"-rm", "nomatch*"});
assertEquals(1, exit);
results = bytes.toString();
assertTrue(results.contains("rm: `nomatch*': No such file or directory"));
} finally {
IOUtils.closeStream(err);
System.setErr(oldErr);
}
}
示例4: getPersistedPaxosData
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
* Retrieve the persisted data for recovering the given segment from disk.
*/
private PersistedRecoveryPaxosData getPersistedPaxosData(long segmentTxId)
throws IOException {
File f = storage.getPaxosFile(segmentTxId);
if (!f.exists()) {
// Default instance has no fields filled in (they're optional)
return null;
}
InputStream in = new FileInputStream(f);
try {
PersistedRecoveryPaxosData ret = PersistedRecoveryPaxosData.parseDelimitedFrom(in);
Preconditions.checkState(ret != null &&
ret.getSegmentState().getStartTxId() == segmentTxId,
"Bad persisted data for segment %s: %s",
segmentTxId, ret);
return ret;
} finally {
IOUtils.closeStream(in);
}
}
示例5: writeStreamToFile
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
void writeStreamToFile(InputStream in, PathData target,
boolean lazyPersist) throws IOException {
FSDataOutputStream out = null;
try {
out = create(target, lazyPersist);
IOUtils.copyBytes(in, out, getConf(), true);
} finally {
IOUtils.closeStream(out); // just in case copyBytes didn't
}
}
示例6: testMultiByteCharacters
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
* Tests use of multi-byte characters in property names and values. This test
* round-trips multi-byte string literals through saving and loading of config
* and asserts that the same values were read.
*/
public void testMultiByteCharacters() throws IOException {
String priorDefaultEncoding = System.getProperty("file.encoding");
try {
System.setProperty("file.encoding", "US-ASCII");
String name = "multi_byte_\u611b_name";
String value = "multi_byte_\u0641_value";
out = new BufferedWriter(new OutputStreamWriter(
new FileOutputStream(CONFIG_MULTI_BYTE), "UTF-8"));
startConfig();
declareProperty(name, value, value);
endConfig();
Configuration conf = new Configuration(false);
conf.addResource(new Path(CONFIG_MULTI_BYTE));
assertEquals(value, conf.get(name));
FileOutputStream fos = new FileOutputStream(CONFIG_MULTI_BYTE_SAVED);
try {
conf.writeXml(fos);
} finally {
IOUtils.closeStream(fos);
}
conf = new Configuration(false);
conf.addResource(new Path(CONFIG_MULTI_BYTE_SAVED));
assertEquals(value, conf.get(name));
} finally {
System.setProperty("file.encoding", priorDefaultEncoding);
}
}
示例7: close
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private void close() throws IOException {
for (int i = 0; i < inReaders.length; i++) {
IOUtils.closeStream(inReaders[i]);
inReaders[i] = null;
}
if (outWriter != null) {
outWriter.close();
outWriter = null;
}
}
示例8: determineMaxIpcNumber
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
* Run through the creation of a log without any faults injected,
* and count how many RPCs are made to each node. This sets the
* bounds for the other test cases, so they can exhaustively explore
* the space of potential failures.
*/
private static long determineMaxIpcNumber() throws Exception {
Configuration conf = new Configuration();
MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf).build();
QuorumJournalManager qjm = null;
long ret;
try {
qjm = createInjectableQJM(cluster);
qjm.format(FAKE_NSINFO);
doWorkload(cluster, qjm);
SortedSet<Integer> ipcCounts = Sets.newTreeSet();
for (AsyncLogger l : qjm.getLoggerSetForTests().getLoggersForTests()) {
InvocationCountingChannel ch = (InvocationCountingChannel)l;
ch.waitForAllPendingCalls();
ipcCounts.add(ch.getRpcCount());
}
// All of the loggers should have sent the same number of RPCs, since there
// were no failures.
assertEquals(1, ipcCounts.size());
ret = ipcCounts.first();
LOG.info("Max IPC count = " + ret);
} finally {
IOUtils.closeStream(qjm);
cluster.shutdown();
}
return ret;
}
示例9: testAppendWithPipelineRecovery
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
* Test to append to the file, when one of datanode in the existing pipeline
* is down.
*/
@Test
public void testAppendWithPipelineRecovery() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
FSDataOutputStream out = null;
try {
cluster = new MiniDFSCluster.Builder(conf).manageDataDfsDirs(true)
.manageNameDfsDirs(true).numDataNodes(4)
.racks(new String[] { "/rack1", "/rack1", "/rack2", "/rack2" })
.build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
Path path = new Path("/test1");
out = fs.create(path, true, BLOCK_SIZE, (short) 3, BLOCK_SIZE);
AppendTestUtil.write(out, 0, 1024);
out.close();
cluster.stopDataNode(3);
out = fs.append(path);
AppendTestUtil.write(out, 1024, 1024);
out.close();
cluster.restartNameNode(true);
AppendTestUtil.check(fs, path, 2048);
} finally {
IOUtils.closeStream(out);
if (null != cluster) {
cluster.shutdown();
}
}
}
示例10: calcPartialBlockChecksum
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private MD5Hash calcPartialBlockChecksum(ExtendedBlock block,
long requestLength, DataChecksum checksum, DataInputStream checksumIn)
throws IOException {
final int bytesPerCRC = checksum.getBytesPerChecksum();
final int csize = checksum.getChecksumSize();
final byte[] buffer = new byte[4*1024];
MessageDigest digester = MD5Hash.getDigester();
long remaining = requestLength / bytesPerCRC * csize;
for (int toDigest = 0; remaining > 0; remaining -= toDigest) {
toDigest = checksumIn.read(buffer, 0,
(int) Math.min(remaining, buffer.length));
if (toDigest < 0) {
break;
}
digester.update(buffer, 0, toDigest);
}
int partialLength = (int) (requestLength % bytesPerCRC);
if (partialLength > 0) {
byte[] buf = new byte[partialLength];
final InputStream blockIn = datanode.data.getBlockInputStream(block,
requestLength - partialLength);
try {
// Get the CRC of the partialLength.
IOUtils.readFully(blockIn, buf, 0, partialLength);
} finally {
IOUtils.closeStream(blockIn);
}
checksum.update(buf, 0, partialLength);
byte[] partialCrc = new byte[csize];
checksum.writeValue(partialCrc, 0, true);
digester.update(partialCrc);
}
return new MD5Hash(digester.digest());
}
示例11: testFileIdMismatch
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
* Test complete(..) - verifies that the fileId in the request
* matches that of the Inode.
* This test checks that FileNotFoundException exception is thrown in case
* the fileId does not match.
*/
@Test
public void testFileIdMismatch() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
DistributedFileSystem dfs = null;
try {
cluster.waitActive();
dfs = cluster.getFileSystem();
DFSClient client = dfs.dfs;
final Path f = new Path("/testFileIdMismatch.txt");
createFile(dfs, f, 3);
long someOtherFileId = -1;
try {
cluster.getNameNodeRpc()
.complete(f.toString(), client.clientName, null, someOtherFileId);
fail();
} catch(LeaseExpiredException e) {
FileSystem.LOG.info("Caught Expected LeaseExpiredException: ", e);
}
} finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
}
示例12: testMultiTableImport
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
public void testMultiTableImport() throws IOException {
String [] argv = getArgv(null, null);
runImport(new ImportAllTablesTool(), argv);
Path warehousePath = new Path(this.getWarehouseDir());
int i = 0;
for (String tableName : this.tableNames) {
Path tablePath = new Path(warehousePath, tableName);
Path filePath = new Path(tablePath, "part-m-00000");
// dequeue the expected value for this table. This
// list has the same order as the tableNames list.
String expectedVal = Integer.toString(i++) + ","
+ this.expectedStrings.get(0);
this.expectedStrings.remove(0);
BufferedReader reader = null;
if (!isOnPhysicalCluster()) {
reader = new BufferedReader(
new InputStreamReader(new FileInputStream(
new File(filePath.toString()))));
} else {
FileSystem dfs = FileSystem.get(getConf());
FSDataInputStream dis = dfs.open(filePath);
reader = new BufferedReader(new InputStreamReader(dis));
}
try {
String line = reader.readLine();
assertEquals("Table " + tableName + " expected a different string",
expectedVal, line);
} finally {
IOUtils.closeStream(reader);
}
}
}
示例13: close
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Override
public void close() {
keyManager.close();
// close the output file
IOUtils.closeStream(out);
if (fs != null) {
try {
fs.delete(idPath, true);
} catch(IOException ioe) {
LOG.warn("Failed to delete " + idPath, ioe);
}
}
}
示例14: close
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/** Close the connection. */
protected synchronized void close() {
if (!shouldCloseConnection.get()) {
LOG.error(getName() + ": the connection is not in the closed state");
return;
}
// release the resources
// first thing to do;take the connection out of the connection list
synchronized (connections) {
connections.removeValue(remoteId, this);
}
// close the streams and therefore the socket
synchronized(this.outLock) {
if (this.out != null) {
IOUtils.closeStream(out);
this.out = null;
}
}
IOUtils.closeStream(in);
this.in = null;
if (this.socket != null) {
try {
this.socket.close();
this.socket = null;
} catch (IOException e) {
LOG.error("Error while closing socket", e);
}
}
disposeSasl();
// log the info
if (LOG.isTraceEnabled()) {
LOG.trace(getName() + ": closing ipc connection to " + server);
}
cleanupCalls(true);
if (LOG.isTraceEnabled()) {
LOG.trace(getName() + ": ipc connection to " + server + " closed");
}
}
示例15: copy
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
* Copy from src to dst, optionally deleting src and overwriting dst.
* @param src
* @param dst
* @param deleteSource - delete src if true
* @param overwrite overwrite dst if true; throw IOException if dst exists
* and overwrite is false.
*
* @return true if copy is successful
*
* @throws AccessControlException If access is denied
* @throws FileAlreadyExistsException If <code>dst</code> already exists
* @throws FileNotFoundException If <code>src</code> does not exist
* @throws ParentNotDirectoryException If parent of <code>dst</code> is not
* a directory
* @throws UnsupportedFileSystemException If file system for
* <code>src</code> or <code>dst</code> is not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*
* RuntimeExceptions:
* @throws InvalidPathException If path <code>dst</code> is invalid
*/
public boolean copy(final Path src, final Path dst, boolean deleteSource,
boolean overwrite) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException,
IOException {
src.checkNotSchemeWithRelative();
dst.checkNotSchemeWithRelative();
Path qSrc = makeQualified(src);
Path qDst = makeQualified(dst);
checkDest(qSrc.getName(), qDst, overwrite);
FileStatus fs = FileContext.this.getFileStatus(qSrc);
if (fs.isDirectory()) {
checkDependencies(qSrc, qDst);
mkdir(qDst, FsPermission.getDirDefault(), true);
FileStatus[] contents = listStatus(qSrc);
for (FileStatus content : contents) {
copy(makeQualified(content.getPath()), makeQualified(new Path(qDst,
content.getPath().getName())), deleteSource, overwrite);
}
} else {
InputStream in=null;
OutputStream out = null;
try {
in = open(qSrc);
EnumSet<CreateFlag> createFlag = overwrite ? EnumSet.of(
CreateFlag.CREATE, CreateFlag.OVERWRITE) :
EnumSet.of(CreateFlag.CREATE);
out = create(qDst, createFlag);
IOUtils.copyBytes(in, out, conf, true);
} finally {
IOUtils.closeStream(out);
IOUtils.closeStream(in);
}
}
if (deleteSource) {
return delete(qSrc, true);
} else {
return true;
}
}