本文整理汇总了Java中org.apache.hadoop.fs.LocalDirAllocator.ifExists方法的典型用法代码示例。如果您正苦于以下问题:Java LocalDirAllocator.ifExists方法的具体用法?Java LocalDirAllocator.ifExists怎么用?Java LocalDirAllocator.ifExists使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.LocalDirAllocator
的用法示例。
在下文中一共展示了LocalDirAllocator.ifExists方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getJsonMeta
import org.apache.hadoop.fs.LocalDirAllocator; //导入方法依赖的package包/类
/**
* Retrieve meta information of file chunks which correspond to the requested URI.
* Only meta information for the file chunks which has non-zero length are retrieved.
*
* @param conf
* @param lDirAlloc
* @param localFS
* @param params
* @param gson
* @param indexReaderCache
* @param lowCacheHitCheckThreshold
* @return
* @throws IOException
* @throws ExecutionException
*/
public static List<String> getJsonMeta(final TajoConf conf,
final LocalDirAllocator lDirAlloc,
final FileSystem localFS,
final PullServerParams params,
final Gson gson,
final LoadingCache<IndexCacheKey, BSTIndexReader> indexReaderCache,
final int lowCacheHitCheckThreshold)
throws IOException, ExecutionException {
final List<String> taskIds = PullServerUtil.splitMaps(params.taskAttemptIds());
final Path queryBaseDir = PullServerUtil.getBaseOutputDir(params.queryId(), params.ebId());
final List<String> jsonMetas = new ArrayList<>();
for (String eachTaskId : taskIds) {
Path outputPath = StorageUtil.concatPath(queryBaseDir, eachTaskId, "output");
if (!lDirAlloc.ifExists(outputPath.toString(), conf)) {
LOG.warn("Range shuffle - file not exist. " + outputPath);
continue;
}
Path path = localFS.makeQualified(lDirAlloc.getLocalPathToRead(outputPath.toString(), conf));
FileChunkMeta meta;
meta = PullServerUtil.searchFileChunkMeta(params.queryId(), params.ebId(), eachTaskId, path,
params.startKey(), params.endKey(), params.last(), indexReaderCache, lowCacheHitCheckThreshold);
if (meta != null && meta.getLength() > 0) {
String jsonStr = gson.toJson(meta, FileChunkMeta.class);
jsonMetas.add(jsonStr);
}
}
return jsonMetas;
}
示例2: run
import org.apache.hadoop.fs.LocalDirAllocator; //导入方法依赖的package包/类
/**
* Main method.
*/
boolean run(String[] args)
throws ClassNotFoundException, IOException, InterruptedException {
if (args.length < 1) {
System.out.println("Usage: IsolationRunner <path>/job.xml " +
"<optional-user-name>");
return false;
}
File jobFilename = new File(args[0]);
if (!jobFilename.exists() || !jobFilename.isFile()) {
System.out.println(jobFilename + " is not a valid job file.");
return false;
}
String user;
if (args.length > 1) {
user = args[1];
} else {
user = UserGroupInformation.getCurrentUser().getShortUserName();
}
JobConf conf = new JobConf(new Path(jobFilename.toString()));
conf.setUser(user);
TaskAttemptID taskId = TaskAttemptID.forName(conf.get("mapred.task.id"));
if (taskId == null) {
System.out.println("mapred.task.id not found in configuration;" +
" job.xml is not a task config");
}
boolean isMap = conf.getBoolean("mapred.task.is.map", true);
if (!isMap) {
System.out.println("Only map tasks are supported.");
return false;
}
int partition = conf.getInt("mapred.task.partition", 0);
// setup the local and user working directories
FileSystem local = FileSystem.getLocal(conf);
LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir");
Path workDirName;
boolean workDirExists = lDirAlloc.ifExists(MRConstants.WORKDIR, conf);
if (workDirExists) {
workDirName = TaskRunner.formWorkDir(lDirAlloc, conf);
} else {
workDirName = lDirAlloc.getLocalPathForWrite(MRConstants.WORKDIR,
conf);
}
local.setWorkingDirectory(new Path(workDirName.toString()));
FileSystem.get(conf).setWorkingDirectory(conf.getWorkingDirectory());
// set up a classloader with the right classpath
ClassLoader classLoader =
makeClassLoader(conf, new File(workDirName.toString()));
Thread.currentThread().setContextClassLoader(classLoader);
conf.setClassLoader(classLoader);
// split.dta file is used only by IsolationRunner. The file can now be in
// any of the configured local disks, so use LocalDirAllocator to find out
// where it is.
Path localMetaSplit =
new LocalDirAllocator("mapred.local.dir").getLocalPathToRead(
TaskTracker.getLocalSplitFile(conf.getUser(), taskId.getJobID()
.toString(), taskId.toString()), conf);
DataInputStream splitFile = FileSystem.getLocal(conf).open(localMetaSplit);
TaskSplitIndex splitIndex = new TaskSplitIndex();
splitIndex.readFields(splitFile);
splitFile.close();
Task task =
new MapTask(jobFilename.toString(), taskId, partition, splitIndex, 1);
task.setConf(conf);
task.run(conf, new FakeUmbilical());
return true;
}