本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.initialize方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.initialize方法的具体用法?Java FileSystem.initialize怎么用?Java FileSystem.initialize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FileSystem
的用法示例。
在下文中一共展示了FileSystem.initialize方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initAndStartStore
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void initAndStartStore(final FileSystem fs) throws IOException,
URISyntaxException {
Configuration conf = new Configuration();
fs.initialize(new URI("/"), conf);
fsWorkingPath =
new Path("target",
TestFileSystemApplicationHistoryStore.class.getSimpleName());
fs.delete(fsWorkingPath, true);
conf.set(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI,
fsWorkingPath.toString());
store = new FileSystemApplicationHistoryStore() {
@Override
protected FileSystem getFileSystem(Path path, Configuration conf) {
return fs;
}
};
store.init(conf);
store.start();
}
示例2: testTracingGlobber
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* Test tracing the globber. This is a regression test for HDFS-9187.
*/
@Test
public void testTracingGlobber() throws Exception {
// Bypass the normal FileSystem object creation path by just creating an
// instance of a subclass.
FileSystem fs = new LocalFileSystem();
fs.initialize(new URI("file:///"), new Configuration());
fs.globStatus(new Path("/"));
fs.close();
}
示例3: testAccessContainerWithWrongVersion
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testAccessContainerWithWrongVersion() throws Exception {
AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
MockStorageInterface mockStorage = new MockStorageInterface();
store.setAzureStorageInteractionLayer(mockStorage);
FileSystem fs = new NativeAzureFileSystem(store);
try {
Configuration conf = new Configuration();
AzureBlobStorageTestAccount.setMockAccountKey(conf);
HashMap<String, String> metadata = new HashMap<String, String>();
metadata.put(AzureNativeFileSystemStore.VERSION_METADATA_KEY,
"2090-04-05"); // It's from the future!
mockStorage.addPreExistingContainer(
AzureBlobStorageTestAccount.getMockContainerUri(), metadata);
boolean passed = false;
try {
fs.initialize(new URI(AzureBlobStorageTestAccount.MOCK_WASB_URI), conf);
fs.listStatus(new Path("/"));
passed = true;
} catch (AzureException ex) {
assertTrue("Unexpected exception message: " + ex,
ex.getMessage().contains("unsupported version: 2090-04-05."));
}
assertFalse("Should've thrown an exception because of the wrong version.",
passed);
} finally {
fs.close();
}
}
示例4: newLocalFileSystem
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
static FileSystem newLocalFileSystem(Configuration conf, boolean isLocalAccessAllowed) throws IOException {
// we'll grab a raw local file system so append is supported (rather than the checksum local file system).
final FileSystem localFS = isLocalAccessAllowed ? new RawLocalFileSystem() : new NoopFileSystem();
localFS.initialize(localFS.getUri(), conf);
return localFS;
}
示例5: testAppendAfterSoftLimit
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/** Tests appending after soft-limit expires. */
@Test
public void testAppendAfterSoftLimit()
throws IOException, InterruptedException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
//Set small soft-limit for lease
final long softLimit = 1L;
final long hardLimit = 9999999L;
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
cluster.setLeasePeriod(softLimit, hardLimit);
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
FileSystem fs2 = new DistributedFileSystem();
fs2.initialize(fs.getUri(), conf);
final Path testPath = new Path("/testAppendAfterSoftLimit");
final byte[] fileContents = AppendTestUtil.initBuffer(32);
// create a new file without closing
FSDataOutputStream out = fs.create(testPath);
out.write(fileContents);
//Wait for > soft-limit
Thread.sleep(250);
try {
FSDataOutputStream appendStream2 = fs2.append(testPath);
appendStream2.write(fileContents);
appendStream2.close();
assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen());
} finally {
fs.close();
fs2.close();
cluster.shutdown();
}
}