本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.closeAllForUGI方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.closeAllForUGI方法的具体用法?Java FileSystem.closeAllForUGI怎么用?Java FileSystem.closeAllForUGI使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FileSystem
的用法示例。
在下文中一共展示了FileSystem.closeAllForUGI方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testCloseAllForUGI
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testCloseAllForUGI() throws Exception {
final Configuration conf = new Configuration();
conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"), conf);
}
});
//Now we should get the cached filesystem
FileSystem fsA1 = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"), conf);
}
});
assertSame(fsA, fsA1);
FileSystem.closeAllForUGI(ugiA);
//Now we should get a different (newly created) filesystem
fsA1 = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"), conf);
}
});
assertNotSame(fsA, fsA1);
}
示例2: closeFileSystems
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
protected void closeFileSystems(UserGroupInformation ugi) {
try {
FileSystem.closeAllForUGI(ugi);
} catch (IOException e) {
LOG.warn("Failed to close filesystems: ", e);
}
}
示例3: closeFileSystems
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
protected void closeFileSystems(final UserGroupInformation userUgi) {
try {
FileSystem.closeAllForUGI(userUgi);
} catch (IOException e) {
LOG.warn("Failed to close filesystems: ", e);
}
}
示例4: testCloseWithFailingFlush
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test (timeout=60000)
public void testCloseWithFailingFlush() throws Exception {
final Configuration conf = HBaseConfiguration.create(CONF);
// Only retry once.
conf.setInt("hbase.hstore.flush.retries.number", 1);
final User user =
User.createUserForTesting(conf, this.name.getMethodName(), new String[]{"foo"});
// Inject our faulty LocalFileSystem
conf.setClass("fs.file.impl", FaultyFileSystem.class, FileSystem.class);
user.runAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// Make sure it worked (above is sensitive to caching details in hadoop core)
FileSystem fs = FileSystem.get(conf);
Assert.assertEquals(FaultyFileSystem.class, fs.getClass());
FaultyFileSystem ffs = (FaultyFileSystem)fs;
HRegion region = null;
try {
// Initialize region
region = initHRegion(tableName, name.getMethodName(), conf, COLUMN_FAMILY_BYTES);
long size = region.getMemstoreSize();
Assert.assertEquals(0, size);
// Put one item into memstore. Measure the size of one item in memstore.
Put p1 = new Put(row);
p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, (byte[])null));
region.put(p1);
// Manufacture an outstanding snapshot -- fake a failed flush by doing prepare step only.
Store store = region.getStore(COLUMN_FAMILY_BYTES);
StoreFlushContext storeFlushCtx = store.createFlushContext(12345);
storeFlushCtx.prepare();
// Now add two entries to the foreground memstore.
Put p2 = new Put(row);
p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual2, 2, (byte[])null));
p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual3, 3, (byte[])null));
region.put(p2);
// Now try close on top of a failing flush.
region.close();
fail();
} catch (IOException dse) {
// Expected
LOG.info("Expected DroppedSnapshotException");
} finally {
// Make it so all writes succeed from here on out so can close clean
ffs.fault.set(false);
region.getWAL().rollWriter(true);
HRegion.closeHRegion(region);
}
return null;
}
});
FileSystem.closeAllForUGI(user.getUGI());
}
示例5: testHandleErrorsInFlush
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testHandleErrorsInFlush() throws Exception {
LOG.info("Setting up a faulty file system that cannot write");
final Configuration conf = HBaseConfiguration.create();
User user = User.createUserForTesting(conf,
"testhandleerrorsinflush", new String[]{"foo"});
// Inject our faulty LocalFileSystem
conf.setClass("fs.file.impl", FaultyFileSystem.class,
FileSystem.class);
user.runAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// Make sure it worked (above is sensitive to caching details in hadoop core)
FileSystem fs = FileSystem.get(conf);
Assert.assertEquals(FaultyFileSystem.class, fs.getClass());
// Initialize region
init(name.getMethodName(), conf);
LOG.info("Adding some data");
store.add(new KeyValue(row, family, qf1, 1, (byte[])null));
store.add(new KeyValue(row, family, qf2, 1, (byte[])null));
store.add(new KeyValue(row, family, qf3, 1, (byte[])null));
LOG.info("Before flush, we should have no files");
Collection<StoreFileInfo> files =
store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName());
Assert.assertEquals(0, files != null ? files.size() : 0);
//flush
try {
LOG.info("Flushing");
flush(1);
Assert.fail("Didn't bubble up IOE!");
} catch (IOException ioe) {
Assert.assertTrue(ioe.getMessage().contains("Fault injected"));
}
LOG.info("After failed flush, we should still have no files!");
files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName());
Assert.assertEquals(0, files != null ? files.size() : 0);
store.getHRegion().getWAL().close();
return null;
}
});
FileSystem.closeAllForUGI(user.getUGI());
}