本文整理匯總了Java中org.apache.hadoop.filecache.DistributedCache.addFileToClassPath方法的典型用法代碼示例。如果您正苦於以下問題:Java DistributedCache.addFileToClassPath方法的具體用法?Java DistributedCache.addFileToClassPath怎麽用?Java DistributedCache.addFileToClassPath使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.filecache.DistributedCache
的用法示例。
在下文中一共展示了DistributedCache.addFileToClassPath方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: addJars
import org.apache.hadoop.filecache.DistributedCache; //導入方法依賴的package包/類
/**
* Add framework or job-specific jars to the classpath through DistributedCache
* so the mappers can use them.
*/
private void addJars(Path jarFileDir, String jarFileList) throws IOException {
LocalFileSystem lfs = FileSystem.getLocal(this.conf);
for (String jarFile : SPLITTER.split(jarFileList)) {
Path srcJarFile = new Path(jarFile);
FileStatus[] fileStatusList = lfs.globStatus(srcJarFile);
for (FileStatus status : fileStatusList) {
// DistributedCache requires absolute path, so we need to use makeQualified.
Path destJarFile = new Path(this.fs.makeQualified(jarFileDir), status.getPath().getName());
// Copy the jar file from local file system to HDFS
this.fs.copyFromLocalFile(status.getPath(), destJarFile);
// Then add the jar file on HDFS to the classpath
LOG.info(String.format("Adding %s to classpath", destJarFile));
DistributedCache.addFileToClassPath(destJarFile, this.conf, this.fs);
}
}
}
示例2: configure
import org.apache.hadoop.filecache.DistributedCache; //導入方法依賴的package包/類
@Override
public void configure(Job job) throws IOException {
for (Path p : getLocalPaths()) {
Configuration conf = job.getConfiguration();
FileSystem jobFS = FileSystem.get(conf);
FileSystem localFS = FileSystem.getLocal(conf);
Path stagedPath = uploadFileIfNecessary(localFS, p, jobFS);
DistributedCache.addFileToClassPath(stagedPath, conf, jobFS);
}
// We don't really need to set a mapred job jar here,
// but doing so suppresses a warning
String mj = getMapredJar();
if (null != mj)
job.getConfiguration().set(Hadoop1Compat.CFG_JOB_JAR, mj);
}
示例3: provisionQueries
import org.apache.hadoop.filecache.DistributedCache; //導入方法依賴的package包/類
@SuppressWarnings("deprecation")
public static <T extends Configuration> T provisionQueries(T cfg) {
if (HadoopCfgUtils.isLocal(cfg)) {
return cfg;
}
try {
DistributedCache.addFileToClassPath(new Path(TestUtils.sampleQueryDsl()), cfg);
DistributedCache.addFileToClassPath(new Path(TestUtils.sampleQueryUri()), cfg);
} catch (IOException ex) {
}
return cfg;
}
示例4: addJars
import org.apache.hadoop.filecache.DistributedCache; //導入方法依賴的package包/類
private void addJars(Configuration conf) throws IOException {
if (!this.dataset.jobProps().contains(MRCompactor.COMPACTION_JARS)) {
return;
}
Path jarFileDir = new Path(this.dataset.jobProps().getProp(MRCompactor.COMPACTION_JARS));
for (FileStatus status : this.fs.listStatus(jarFileDir)) {
DistributedCache.addFileToClassPath(status.getPath(), conf, this.fs);
}
}
示例5: run
import org.apache.hadoop.filecache.DistributedCache; //導入方法依賴的package包/類
@Override
public int run(String[] args) throws Exception {
final CmdLineParser parser = new CmdLineParser(this);
try {
parser.parseArgument(args);
} catch (final CmdLineException e) {
System.err.println(e.getMessage());
System.err.println("Usage: hadoop jar HadoopImageIndexer.jar [options]");
parser.printUsage(System.err);
return -1;
}
final Path[] paths = SequenceFileUtility.getFilePaths(input, "part");
final Path outputPath = new Path(output);
if (outputPath.getFileSystem(this.getConf()).exists(outputPath) && replace)
outputPath.getFileSystem(this.getConf()).delete(outputPath, true);
final Job job = TextBytesJobUtil.createJob(paths, outputPath, null, this.getConf());
job.setJarByClass(this.getClass());
job.setMapperClass(PcaVladMapper.class);
job.setNumReduceTasks(0);
DistributedCache.addFileToClassPath(new Path(indexerData), job.getConfiguration());
job.getConfiguration().set(VLAD_INDEXER_DATA_PATH_KEY, new Path(indexerData).getName());
SequenceFileOutputFormat.setCompressOutput(job, !dontcompress);
job.waitForCompletion(true);
return 0;
}
示例6: addFileToClasspath
import org.apache.hadoop.filecache.DistributedCache; //導入方法依賴的package包/類
/**
* Adds a file in HDFS to the classpath for hadoop nodes (via the
* DistributedCache)
*
* @param hdfsConfig the HDFSConfig object with host and port set
* @param conf the Configuration object that will be changed by this operation
* @param path the path to the file (in HDFS) to be added to the classpath for
* hadopp nodes
* @param env any environment variables
* @throws IOException if a problem occurs
*/
public static void addFileToClasspath(HDFSConfig hdfsConfig,
Configuration conf, String path, Environment env) throws IOException {
// conf.set(HDFSConfig.FS_DEFAULT_NAME,
// HDFSConfig.constructHostURL(hdfsConfig, env));
hdfsConfig.configureForHadoop(conf, env);
FileSystem fs = FileSystem.get(conf);
if (path.startsWith("hdfs://")) {
throw new IOException("Path should not include 'hdfs://host:port'");
}
if (env != null) {
try {
path = env.substitute(path);
} catch (Exception ex) {
}
}
// if (!path.startsWith("/")) {
// path = "/" + path;
// }
// We know that all job-specific jars are installed in the user's home
// directory
Path destPath = new Path(path);
String userHome = fs.getHomeDirectory().toString();
String absolutePath = userHome + "/" + destPath.toString();
if (absolutePath.startsWith("hdfs://")) {
// strip this off - for some reason under CDH4
// DistributedCache.addFileToClassPath() keeps the hdfs:// part
// of the URL in the classpath spec! Apache does not do this.
absolutePath = absolutePath.replace("hdfs://", "");
absolutePath = absolutePath.substring(absolutePath.indexOf("/"),
absolutePath.length());
}
destPath = new Path(absolutePath);
DistributedCache.addFileToClassPath(destPath, conf, fs);
checkForWindowsAccessingHadoopOnLinux(conf);
}