本文整理汇总了Java中org.apache.hadoop.io.IOUtils.cleanup方法的典型用法代码示例。如果您正苦于以下问题:Java IOUtils.cleanup方法的具体用法?Java IOUtils.cleanup怎么用?Java IOUtils.cleanup使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.io.IOUtils
的用法示例。
在下文中一共展示了IOUtils.cleanup方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testMidKey
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Test
@SuppressWarnings("deprecation")
public void testMidKey() throws Exception {
// Write a mapfile of simple data: keys are
Path dirName = new Path(TEST_DIR, "testMidKey.mapfile");
FileSystem fs = FileSystem.getLocal(conf);
Path qualifiedDirName = fs.makeQualified(dirName);
MapFile.Writer writer = null;
MapFile.Reader reader = null;
try {
writer = new MapFile.Writer(conf, fs, qualifiedDirName.toString(),
IntWritable.class, IntWritable.class);
writer.append(new IntWritable(1), new IntWritable(1));
writer.close();
// Now do getClosest on created mapfile.
reader = new MapFile.Reader(qualifiedDirName, conf);
assertEquals(new IntWritable(1), reader.midKey());
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
示例2: testDeleteFile
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
* test {@code BloomMapFile.delete()} method
*/
@Test
public void testDeleteFile() {
BloomMapFile.Writer writer = null;
try {
FileSystem fs = FileSystem.getLocal(conf);
writer = new BloomMapFile.Writer(conf, TEST_FILE,
MapFile.Writer.keyClass(IntWritable.class),
MapFile.Writer.valueClass(Text.class));
assertNotNull("testDeleteFile error !!!", writer);
writer.close();
BloomMapFile.delete(fs, TEST_FILE.toString());
} catch (Exception ex) {
fail("unexpect ex in testDeleteFile !!!");
} finally {
IOUtils.cleanup(null, writer);
}
}
示例3: testOnFinalKey
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
* test {@code MapFile.Reader.finalKey()} method
*/
@Test
public void testOnFinalKey() {
final String TEST_METHOD_KEY = "testOnFinalKey.mapfile";
int SIZE = 10;
MapFile.Writer writer = null;
MapFile.Reader reader = null;
try {
writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
IntWritable.class);
for (int i = 0; i < SIZE; i++)
writer.append(new IntWritable(i), new IntWritable(i));
writer.close();
reader = createReader(TEST_METHOD_KEY, IntWritable.class);
IntWritable expectedKey = new IntWritable(0);
reader.finalKey(expectedKey);
assertEquals("testOnFinalKey not same !!!", expectedKey, new IntWritable(
9));
} catch (IOException ex) {
fail("testOnFinalKey error !!!");
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
示例4: LocalFSFileOutputStream
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private LocalFSFileOutputStream(Path f, boolean append,
FsPermission permission) throws IOException {
File file = pathToFile(f);
if (!append && permission == null) {
permission = FsPermission.getFileDefault();
}
if (permission == null) {
this.fos = new FileOutputStream(file, append);
} else {
permission = permission.applyUMask(umask);
if (Shell.WINDOWS && NativeIO.isAvailable()) {
this.fos = NativeIO.Windows.createFileOutputStreamWithMode(file,
append, permission.toShort());
} else {
this.fos = new FileOutputStream(file, append);
boolean success = false;
try {
setPermission(f, permission);
success = true;
} finally {
if (!success) {
IOUtils.cleanup(LOG, this.fos);
}
}
}
}
}
示例5: loadTokenMasterKey
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private void loadTokenMasterKey(HistoryServerState state, Path keyFile,
long numKeyFileBytes) throws IOException {
DelegationKey key = new DelegationKey();
byte[] keyData = readFile(keyFile, numKeyFileBytes);
DataInputStream in =
new DataInputStream(new ByteArrayInputStream(keyData));
try {
key.readFields(in);
} finally {
IOUtils.cleanup(LOG, in);
}
state.tokenMasterKeyState.add(key);
}
示例6: loadFile
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
* Load file into byte[]
*/
public static byte[] loadFile(String filename) throws IOException {
File file = new File(filename);
DataInputStream in = new DataInputStream(new FileInputStream(file));
byte[] content = new byte[(int)file.length()];
try {
in.readFully(content);
} finally {
IOUtils.cleanup(LOG, in);
}
return content;
}
示例7: shutdown
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@After
public void shutdown() {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
示例8: testKeyLessWriterCreation
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
* test {@code MapFile.Writer} constructor
* with IllegalArgumentException
*
*/
@Test
public void testKeyLessWriterCreation() {
MapFile.Writer writer = null;
try {
writer = new MapFile.Writer(conf, TEST_DIR);
fail("fail in testKeyLessWriterCreation !!!");
} catch (IllegalArgumentException ex) {
} catch (Exception e) {
fail("fail in testKeyLessWriterCreation. Other ex !!!");
} finally {
IOUtils.cleanup(null, writer);
}
}
示例9: copyBytes
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@VisibleForTesting
long copyBytes(FileStatus sourceFileStatus, long sourceOffset,
OutputStream outStream, int bufferSize, Mapper.Context context)
throws IOException {
Path source = sourceFileStatus.getPath();
byte buf[] = new byte[bufferSize];
ThrottledInputStream inStream = null;
long totalBytesRead = 0;
try {
inStream = getInputStream(source, context.getConfiguration());
int bytesRead = readBytes(inStream, buf, sourceOffset);
while (bytesRead >= 0) {
totalBytesRead += bytesRead;
if (action == FileAction.APPEND) {
sourceOffset += bytesRead;
}
outStream.write(buf, 0, bytesRead);
updateContextStatus(totalBytesRead, context, sourceFileStatus);
bytesRead = readBytes(inStream, buf, sourceOffset);
}
outStream.close();
outStream = null;
} finally {
IOUtils.cleanup(LOG, outStream, inStream);
}
return totalBytesRead;
}
示例10: testRenameWithException
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
* test {@code MapFile.rename()}
* method with throwing {@code IOException}
*/
@Test
public void testRenameWithException() {
final String ERROR_MESSAGE = "Can't rename file";
final String NEW_FILE_NAME = "test-new.mapfile";
final String OLD_FILE_NAME = "test-old.mapfile";
MapFile.Writer writer = null;
try {
FileSystem fs = FileSystem.getLocal(conf);
FileSystem spyFs = spy(fs);
writer = createWriter(OLD_FILE_NAME, IntWritable.class, IntWritable.class);
writer.close();
Path oldDir = new Path(TEST_DIR, OLD_FILE_NAME);
Path newDir = new Path(TEST_DIR, NEW_FILE_NAME);
when(spyFs.rename(oldDir, newDir)).thenThrow(
new IOException(ERROR_MESSAGE));
MapFile.rename(spyFs, oldDir.toString(), newDir.toString());
fail("testRenameWithException no exception error !!!");
} catch (IOException ex) {
assertEquals("testRenameWithException invalid IOExceptionMessage !!!",
ex.getMessage(), ERROR_MESSAGE);
} finally {
IOUtils.cleanup(null, writer);
}
}
示例11: tearDown
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@After
public void tearDown() throws Exception {
IOUtils.cleanup(LOG, clients.toArray(new SocketChannel[clients.size()]));
IOUtils.cleanup(LOG, fs);
if (serverSocket != null) {
try {
serverSocket.close();
} catch (IOException e) {
LOG.debug("Exception in closing " + serverSocket, e);
}
}
if (serverThread != null) {
serverThread.join();
}
}
示例12: testGetBloomMapFile
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
* test {@link BloomMapFile.Reader#get(WritableComparable, Writable)} method
*/
@Test
public void testGetBloomMapFile() {
int SIZE = 10;
BloomMapFile.Reader reader = null;
BloomMapFile.Writer writer = null;
try {
writer = new BloomMapFile.Writer(conf, TEST_FILE,
MapFile.Writer.keyClass(IntWritable.class),
MapFile.Writer.valueClass(Text.class));
for (int i = 0; i < SIZE; i++) {
writer.append(new IntWritable(i), new Text());
}
writer.close();
reader = new BloomMapFile.Reader(TEST_FILE, conf,
MapFile.Reader.comparator(new WritableComparator(IntWritable.class)));
for (int i = 0; i < SIZE; i++) {
assertNotNull("testGetBloomMapFile error !!!",
reader.get(new IntWritable(i), new Text()));
}
assertNull("testGetBloomMapFile error !!!",
reader.get(new IntWritable(SIZE + 5), new Text()));
} catch (Exception ex) {
fail("unexpect ex in testGetBloomMapFile !!!");
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
示例13: testManyClosedSocketsInCache
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Test(timeout=30000)
public void testManyClosedSocketsInCache() throws Exception {
// Make a small file
Configuration clientConf = new Configuration(conf);
clientConf.set(DFS_CLIENT_CONTEXT, "testManyClosedSocketsInCache");
DistributedFileSystem fs =
(DistributedFileSystem)FileSystem.get(cluster.getURI(),
clientConf);
PeerCache peerCache = ClientContext.getFromConf(clientConf).getPeerCache();
DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);
// Insert a bunch of dead sockets in the cache, by opening
// many streams concurrently, reading all of the data,
// and then closing them.
InputStream[] stms = new InputStream[5];
try {
for (int i = 0; i < stms.length; i++) {
stms[i] = fs.open(TEST_FILE);
}
for (InputStream stm : stms) {
IOUtils.copyBytes(stm, new IOUtils.NullOutputStream(), 1024);
}
} finally {
IOUtils.cleanup(null, stms);
}
assertEquals(5, peerCache.size());
// Let all the xceivers timeout
Thread.sleep(1500);
assertXceiverCount(0);
// Client side still has the sockets cached
assertEquals(5, peerCache.size());
// Reading should not throw an exception.
DFSTestUtil.readFile(fs, TEST_FILE);
}
示例14: createRbw
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Override // FsDatasetSpi
public synchronized ReplicaHandler createRbw(
StorageType storageType, ExtendedBlock b, boolean allowLazyPersist)
throws IOException {
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
b.getBlockId());
if (replicaInfo != null) {
throw new ReplicaAlreadyExistsException("Block " + b +
" already exists in state " + replicaInfo.getState() +
" and thus cannot be created.");
}
// create a new block
FsVolumeReference ref;
while (true) {
try {
if (allowLazyPersist) {
// First try to place the block on a transient volume.
ref = volumes.getNextTransientVolume(b.getNumBytes());
datanode.getMetrics().incrRamDiskBlocksWrite();
} else {
ref = volumes.getNextVolume(storageType, b.getNumBytes());
}
} catch (DiskOutOfSpaceException de) {
if (allowLazyPersist) {
datanode.getMetrics().incrRamDiskBlocksWriteFallback();
allowLazyPersist = false;
continue;
}
throw de;
}
break;
}
FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
// create an rbw file to hold block in the designated volume
File f;
try {
f = v.createRbwFile(b.getBlockPoolId(), b.getLocalBlock());
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(b.getBlockId(),
b.getGenerationStamp(), v, f.getParentFile(), b.getNumBytes());
volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
return new ReplicaHandler(newReplicaInfo, ref);
}
示例15: serviceInit
import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Override
@SuppressWarnings("unchecked")
protected void serviceInit(Configuration conf) throws Exception {
Preconditions.checkArgument(conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_TTL_MS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_TTL_MS) > 0,
"%s property value should be greater than zero",
YarnConfiguration.TIMELINE_SERVICE_TTL_MS);
Preconditions.checkArgument(conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS) > 0,
"%s property value should be greater than zero",
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS);
Preconditions.checkArgument(conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE) >= 0,
"%s property value should be greater than or equal to zero",
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE);
Preconditions.checkArgument(conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE) > 0,
" %s property value should be greater than zero",
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE);
Preconditions.checkArgument(conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE) > 0,
"%s property value should be greater than zero",
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE);
Options options = new Options();
options.createIfMissing(true);
options.cacheSize(conf.getLong(
YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE));
JniDBFactory factory = new JniDBFactory();
Path dbPath = new Path(
conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH), FILENAME);
FileSystem localFS = null;
try {
localFS = FileSystem.getLocal(conf);
if (!localFS.exists(dbPath)) {
if (!localFS.mkdirs(dbPath)) {
throw new IOException("Couldn't create directory for leveldb " +
"timeline store " + dbPath);
}
localFS.setPermission(dbPath, LEVELDB_DIR_UMASK);
}
} finally {
IOUtils.cleanup(LOG, localFS);
}
LOG.info("Using leveldb path " + dbPath);
db = factory.open(new File(dbPath.toString()), options);
checkVersion();
startTimeWriteCache =
Collections.synchronizedMap(new LRUMap(getStartTimeWriteCacheSize(
conf)));
startTimeReadCache =
Collections.synchronizedMap(new LRUMap(getStartTimeReadCacheSize(
conf)));
if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_TTL_ENABLE, true)) {
deletionThread = new EntityDeletionThread(conf);
deletionThread.start();
}
super.serviceInit(conf);
}