本文整理匯總了Java中org.apache.hadoop.fs.LocalFileSystem.get方法的典型用法代碼示例。如果您正苦於以下問題:Java LocalFileSystem.get方法的具體用法?Java LocalFileSystem.get怎麽用?Java LocalFileSystem.get使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.fs.LocalFileSystem
的用法示例。
在下文中一共展示了LocalFileSystem.get方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testRead
import org.apache.hadoop.fs.LocalFileSystem; //導入方法依賴的package包/類
@Test
@Ignore
public void testRead() throws Exception {
String file = tmp.newFile().getAbsolutePath();
JsonFileReader reader = new JsonFileReader(LocalFileSystem.get(new Configuration()), new Path(file), null);
String jsonStr = "{ \"truckId\" : 10, \"driverId\" : 20 }\n" +
"{ \"truckId\" : 11, \"driverId\" : 21 }\n" +
"{ \"truckId\" : 12, \"driverId\" : 22 }\n" +
"{ \"truckId\" : 13, \"driverId\" : 23 }\n" +
"{ \"truckId\" : 14, \"driverId\" : 24 }\n" +
"{ \"truckId\" : 15, \"driverId\" : 25 }\n" +
"{ \"truckId\" : 16, \"driverId\" : 26 }";
dumpToFile(jsonStr, file);
int lineCount = 0;
List<Object> x = reader.next();
while( x!=null ) {
System.err.println(x.get(0));
x = reader.next();
++lineCount;
}
Assert.assertEquals(7, lineCount);
}
示例2: work
import org.apache.hadoop.fs.LocalFileSystem; //導入方法依賴的package包/類
private void work(String hadoop_conf_path, String baseDir) throws IOException {
Configuration conf = getConf(hadoop_conf_path);
FileSystem localfs = LocalFileSystem.get(conf);
System.out.println(localfs.getClass().getName());
if (baseDir == null) baseDir = "/home/winter/temp/data/lccindex";
traverse(new File(baseDir));
System.out.println("total hdfs file: " + totalLCCCounter);
}
示例3: setup
import org.apache.hadoop.fs.LocalFileSystem; //導入方法依賴的package包/類
@Override
public void setup(Context context) throws IOException {
Configuration conf = context.getConfiguration();
Path[] files = context.getLocalCacheFiles();
Path[] archives = context.getLocalCacheArchives();
FileSystem fs = LocalFileSystem.get(conf);
// Check that 2 files and 2 archives are present
TestCase.assertEquals(2, files.length);
TestCase.assertEquals(2, archives.length);
// Check lengths of the files
TestCase.assertEquals(1, fs.getFileStatus(files[0]).getLen());
TestCase.assertTrue(fs.getFileStatus(files[1]).getLen() > 1);
// Check extraction of the archive
TestCase.assertTrue(fs.exists(new Path(archives[0],
"distributed.jar.inside3")));
TestCase.assertTrue(fs.exists(new Path(archives[1],
"distributed.jar.inside4")));
// Check the class loaders
LOG.info("Java Classpath: " + System.getProperty("java.class.path"));
ClassLoader cl = Thread.currentThread().getContextClassLoader();
// Both the file and the archive were added to classpath, so both
// should be reachable via the class loader.
TestCase.assertNotNull(cl.getResource("distributed.jar.inside2"));
TestCase.assertNotNull(cl.getResource("distributed.jar.inside3"));
TestCase.assertNull(cl.getResource("distributed.jar.inside4"));
// Check that the symlink for the renaming was created in the cwd;
TestCase.assertTrue("symlink distributed.first.symlink doesn't exist",
symlinkFile.exists());
TestCase.assertEquals("symlink distributed.first.symlink length not 1", 1,
symlinkFile.length());
//This last one is a difference between MRv2 and MRv1
TestCase.assertTrue("second file should be symlinked too",
expectedAbsentSymlinkFile.exists());
}
示例4: storeData
import org.apache.hadoop.fs.LocalFileSystem; //導入方法依賴的package包/類
@Override
public void storeData(final String subPathStr, final byte[] data) throws IOException {
final Path path = new Path(baseDir, subPathStr);
final FileSystem fs = LocalFileSystem.get(new Configuration());
try (FSDataOutputStream fos = fs.create(path)) {
fos.write(data);
fos.close();
}
LOG.log(Level.INFO, "Successfully wrote {0} bytes data to {1}", new Object[] {data.length, path.toString()});
}
示例5: testDefaultFsIsUsedForHistory
import org.apache.hadoop.fs.LocalFileSystem; //導入方法依賴的package包/類
@Test (timeout=50000)
public void testDefaultFsIsUsedForHistory() throws Exception {
// Create default configuration pointing to the minicluster
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
dfsCluster.getURI().toString());
FileOutputStream os = new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
// simulate execution under a non-default namenode
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
"file:///");
TestParams t = new TestParams();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.dfsWorkDir);
JHEvenHandlerForTest realJheh =
new JHEvenHandlerForTest(t.mockAppContext, 0, false);
JHEvenHandlerForTest jheh = spy(realJheh);
jheh.init(conf);
try {
jheh.start();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
new Counters(), new Counters())));
// If we got here then event handler worked but we don't know with which
// file system. Now we check that history stuff was written to minicluster
FileSystem dfsFileSystem = dfsCluster.getFileSystem();
assertTrue("Minicluster contains some history files",
dfsFileSystem.globStatus(new Path(t.dfsWorkDir + "/*")).length != 0);
FileSystem localFileSystem = LocalFileSystem.get(conf);
assertFalse("No history directory on non-default file system",
localFileSystem.exists(new Path(t.dfsWorkDir)));
} finally {
jheh.stop();
}
}
示例6: setup
import org.apache.hadoop.fs.LocalFileSystem; //導入方法依賴的package包/類
public void setup(TaskInputOutputContext<?, ?, ?, ?> context)
throws IOException {
Configuration conf = context.getConfiguration();
Path[] localFiles = context.getLocalCacheFiles();
URI[] files = context.getCacheFiles();
Path[] localArchives = context.getLocalCacheArchives();
URI[] archives = context.getCacheArchives();
FileSystem fs = LocalFileSystem.get(conf);
// Check that 2 files and 2 archives are present
TestCase.assertEquals(2, localFiles.length);
TestCase.assertEquals(2, localArchives.length);
TestCase.assertEquals(2, files.length);
TestCase.assertEquals(2, archives.length);
// Check the file name
TestCase.assertTrue(files[0].getPath().endsWith("distributed.first"));
TestCase.assertTrue(files[1].getPath().endsWith("distributed.second.jar"));
// Check lengths of the files
TestCase.assertEquals(1, fs.getFileStatus(localFiles[0]).getLen());
TestCase.assertTrue(fs.getFileStatus(localFiles[1]).getLen() > 1);
// Check extraction of the archive
TestCase.assertTrue(fs.exists(new Path(localArchives[0],
"distributed.jar.inside3")));
TestCase.assertTrue(fs.exists(new Path(localArchives[1],
"distributed.jar.inside4")));
// Check the class loaders
LOG.info("Java Classpath: " + System.getProperty("java.class.path"));
ClassLoader cl = Thread.currentThread().getContextClassLoader();
// Both the file and the archive were added to classpath, so both
// should be reachable via the class loader.
TestCase.assertNotNull(cl.getResource("distributed.jar.inside2"));
TestCase.assertNotNull(cl.getResource("distributed.jar.inside3"));
TestCase.assertNull(cl.getResource("distributed.jar.inside4"));
// Check that the symlink for the renaming was created in the cwd;
TestCase.assertTrue("symlink distributed.first.symlink doesn't exist",
symlinkFile.exists());
TestCase.assertEquals("symlink distributed.first.symlink length not 1", 1,
symlinkFile.length());
//This last one is a difference between MRv2 and MRv1
TestCase.assertTrue("second file should be symlinked too",
expectedAbsentSymlinkFile.exists());
}
示例7: testDefaultFsIsUsedForHistory
import org.apache.hadoop.fs.LocalFileSystem; //導入方法依賴的package包/類
@Test (timeout=50000)
public void testDefaultFsIsUsedForHistory() throws Exception {
// Create default configuration pointing to the minicluster
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
dfsCluster.getURI().toString());
FileOutputStream os = new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
// simulate execution under a non-default namenode
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
"file:///");
TestParams t = new TestParams();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.dfsWorkDir);
JHEvenHandlerForTest realJheh =
new JHEvenHandlerForTest(t.mockAppContext, 0, false);
JHEvenHandlerForTest jheh = spy(realJheh);
jheh.init(conf);
try {
jheh.start();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000)));
handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
new Counters(), new Counters())));
// If we got here then event handler worked but we don't know with which
// file system. Now we check that history stuff was written to minicluster
FileSystem dfsFileSystem = dfsCluster.getFileSystem();
assertTrue("Minicluster contains some history files",
dfsFileSystem.globStatus(new Path(t.dfsWorkDir + "/*")).length != 0);
FileSystem localFileSystem = LocalFileSystem.get(conf);
assertFalse("No history directory on non-default file system",
localFileSystem.exists(new Path(t.dfsWorkDir)));
} finally {
jheh.stop();
}
}
示例8: setup
import org.apache.hadoop.fs.LocalFileSystem; //導入方法依賴的package包/類
@Override
public void setup(Context context) throws IOException {
Configuration conf = context.getConfiguration();
Path[] files = DistributedCache.getLocalCacheFiles(conf);
Path[] archives = DistributedCache.getLocalCacheArchives(conf);
FileSystem fs = LocalFileSystem.get(conf);
// Check that 2 files and 2 archives are present
TestCase.assertEquals(2, files.length);
TestCase.assertEquals(2, archives.length);
// Check lengths of the files
TestCase.assertEquals(1, fs.getFileStatus(files[0]).getLen());
TestCase.assertTrue(fs.getFileStatus(files[1]).getLen() > 1);
// Check extraction of the archive
TestCase.assertTrue(fs.exists(new Path(archives[0],
"distributed.jar.inside3")));
TestCase.assertTrue(fs.exists(new Path(archives[1],
"distributed.jar.inside4")));
// Check the class loaders
LOG.info("Java Classpath: " + System.getProperty("java.class.path"));
ClassLoader cl = Thread.currentThread().getContextClassLoader();
// Both the file and the archive were added to classpath, so both
// should be reachable via the class loader.
TestCase.assertNotNull(cl.getResource("distributed.jar.inside2"));
TestCase.assertNotNull(cl.getResource("distributed.jar.inside3"));
TestCase.assertNull(cl.getResource("distributed.jar.inside4"));
// Check that the symlink for the renaming was created in the cwd;
// This only happens for real for non-local jobtrackers.
// (The symlinks exist in "localRunner/" for local Jobtrackers,
// but the user has no way to get at them.
if (!"local".equals(
context.getConfiguration().get("mapred.job.tracker"))) {
File symlinkFile = new File("distributed.first.symlink");
TestCase.assertTrue(symlinkFile.exists());
TestCase.assertEquals(1, symlinkFile.length());
}
}
示例9: testDefaultFsIsUsedForHistory
import org.apache.hadoop.fs.LocalFileSystem; //導入方法依賴的package包/類
@Test (timeout=50000)
public void testDefaultFsIsUsedForHistory() throws Exception {
// Create default configuration pointing to the minicluster
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
dfsCluster.getURI().toString());
FileOutputStream os = new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
// simulate execution under a non-default namenode
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
"file:///");
TestParams t = new TestParams();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.dfsWorkDir);
JHEvenHandlerForTest realJheh =
new JHEvenHandlerForTest(t.mockAppContext, 0, false);
JHEvenHandlerForTest jheh = spy(realJheh);
jheh.init(conf);
try {
jheh.start();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
new Counters(), new Counters())));
// If we got here then event handler worked but we don't know with which
// file system. Now we check that history stuff was written to minicluster
FileSystem dfsFileSystem = dfsCluster.getFileSystem();
assertTrue("Minicluster contains some history files",
dfsFileSystem.globStatus(new Path(t.dfsWorkDir + "/*")).length != 0);
FileSystem localFileSystem = LocalFileSystem.get(conf);
assertFalse("No history directory on non-default file system",
localFileSystem.exists(new Path(t.dfsWorkDir)));
} finally {
jheh.stop();
purgeHdfsHistoryIntermediateDoneDirectory(conf);
}
}