当前位置: 首页>>代码示例>>Java>>正文


Java RunJar.unJar方法代码示例

本文整理汇总了Java中org.apache.hadoop.util.RunJar.unJar方法的典型用法代码示例。如果您正苦于以下问题:Java RunJar.unJar方法的具体用法?Java RunJar.unJar怎么用?Java RunJar.unJar使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.util.RunJar的用法示例。


在下文中一共展示了RunJar.unJar方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: localizeJobJarFile

import org.apache.hadoop.util.RunJar; //导入方法依赖的package包/类
/**
 * Download the job jar file from FS to the local file system and unjar it.
 * Set the local jar file in the passed configuration.
 *
 * @param localJobConf
 * @throws IOException
 */
private void localizeJobJarFile(JobConf localJobConf) throws IOException {
  // copy Jar file to the local FS and unjar it.
  String jarFile = localJobConf.getJar();
  FileStatus status = null;
  long jarFileSize = -1;
  if (jarFile != null) {
    Path jarFilePath = new Path(jarFile);
    FileSystem userFs = jarFilePath.getFileSystem(localJobConf);
    try {
      status = userFs.getFileStatus(jarFilePath);
      jarFileSize = status.getLen();
    } catch (FileNotFoundException fe) {
      jarFileSize = -1;
    }
    // Here we check for five times the size of jarFileSize to accommodate for
    // unjarring the jar file in the jars directory
    Path localJarFile =
      lDirAlloc.getLocalPathForWrite(JARDST, 5 * jarFileSize, ttConf);

    //Download job.jar
    userFs.copyToLocalFile(jarFilePath, localJarFile);
    localJobConf.setJar(localJarFile.toString());
    // also unjar the parts of the job.jar that need to end up on the
    // classpath, or explicitly requested by the user.
    RunJar.unJar(
      new File(localJarFile.toString()),
      new File(localJarFile.getParent().toString()),
      localJobConf.getJarUnpackPattern());
    FileUtil.chmod(localJarFile.getParent().toString(), "ugo+rx", true);
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:39,代码来源:JobLocalizer.java

示例2: localizeJobJarFile

import org.apache.hadoop.util.RunJar; //导入方法依赖的package包/类
/**
 * Download the job jar file from FS to the local file system and unjar it.
 * Set the local jar file in the passed configuration.
 *
 * @param localJobConf
 * @throws IOException
 */
private void localizeJobJarFile(JobConf localJobConf) throws IOException {
  // copy Jar file to the local FS and unjar it.
  String jarFile = localJobConf.getJar();
  FileStatus status = null;
  long jarFileSize = -1;
  if (jarFile != null) {
    Path jarFilePath = new Path(jarFile);
    FileSystem userFs = jarFilePath.getFileSystem(localJobConf);
    try {
      status = userFs.getFileStatus(jarFilePath);
      jarFileSize = status.getLen();
    } catch (FileNotFoundException fe) {
      jarFileSize = -1;
    }
    // Here we check for five times the size of jarFileSize to accommodate for
    // unjarring the jar file in the jars directory
    Path localJarFile =
      lDirAlloc.getLocalPathForWrite(JARDST, 5 * jarFileSize, ttConf);

    //Download job.jar
    userFs.copyToLocalFile(jarFilePath, localJarFile);
    localJobConf.setJar(localJarFile.toString());
    // Also un-jar the job.jar files. We un-jar it so that classes inside
    // sub-directories, for e.g., lib/, classes/ are available on class-path
    RunJar.unJar(new File(localJarFile.toString()),
        new File(localJarFile.getParent().toString()));
    FileUtil.chmod(localJarFile.getParent().toString(), "ugo+rx", true);
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:37,代码来源:JobLocalizer.java

示例3: localizeJobJarFile

import org.apache.hadoop.util.RunJar; //导入方法依赖的package包/类
/**
 * Download the job jar file from FS to the local file system and unjar it.
 * Set the local jar file in the passed configuration.
 *
 * @param localJobConf
 * @throws IOException
 */
private void localizeJobJarFile(JobConf localJobConf) throws IOException {
  // copy Jar file to the local FS and unjar it.
  String jarFile = localJobConf.getJar();
  FileStatus status = null;
  long jarFileSize = -1;
  if (jarFile != null) {
    Path jarFilePath = new Path(jarFile);
    FileSystem userFs = jarFilePath.getFileSystem(localJobConf);
    try {
      status = userFs.getFileStatus(jarFilePath);
      jarFileSize = status.getLen();
    } catch (FileNotFoundException fe) {
      jarFileSize = -1;
    }
    // Here we check for five times the size of jarFileSize to accommodate for
    // unjarring the jar file in the jars directory
    Path localJarFile =
      lDirAlloc.getLocalPathForWrite(JARDST, 5 * jarFileSize, ttConf);

    //Download job.jar
    userFs.copyToLocalFile(jarFilePath, localJarFile);
    localJobConf.setJar(localJarFile.toString());
    // also unjar the parts of the job.jar that need to end up on the
    // classpath, or explicitly requested by the user.
    RunJar.unJar(
      new File(localJarFile.toString()),
      new File(localJarFile.getParent().toString()),
      localJobConf.getJarUnpackPattern());
    try {
      FileUtil.chmod(localJarFile.getParent().toString(), "ugo+rx", true);
    } catch (InterruptedException ie) {
      // This exception is never actually thrown, but the signature says
      // it is, and we can't make the incompatible change within CDH
      throw new IOException("Interrupted while chmodding", ie);
    }
  }
}
 
开发者ID:karahiyo,项目名称:hanoi-hadoop-2.0.0-cdh,代码行数:45,代码来源:JobLocalizer.java

示例4: localizeJobJarFile

import org.apache.hadoop.util.RunJar; //导入方法依赖的package包/类
/**
 * Download the job jar file from FS to the local file system and unjar it.
 * Set the local jar file in the passed configuration.
 * 
 * @param jobId
 * @param localFs
 * @param localJobConf
 * @throws IOException
 */
private void localizeJobJarFile(String user, JobID jobId, FileSystem localFs,
    JobConf localJobConf)
    throws IOException, InterruptedException {
  // copy Jar file to the local FS and unjar it.
  String jarFile = localJobConf.getJar();
  FileStatus status = null;
  long jarFileSize = -1;
  if (jarFile != null) {
    Path jarFilePath = new Path(jarFile);
    FileSystem fs = getFS(jarFilePath, jobId, localJobConf);
    try {
      status = fs.getFileStatus(jarFilePath);
      jarFileSize = status.getLen();
    } catch (FileNotFoundException fe) {
      jarFileSize = -1;
    }
    // Here we check for five times the size of jarFileSize to accommodate for
    // unjarring the jar file in the jars directory
    Path localJarFile =
        lDirAlloc.getLocalPathForWrite(
            getJobJarFile(user, jobId.toString()), 5 * jarFileSize, fConf);

    // Download job.jar
    fs.copyToLocalFile(jarFilePath, localJarFile);

    localJobConf.setJar(localJarFile.toString());

    // Un-jar the parts of the job.jar that need to be added to the classpath
    RunJar.unJar(
      new File(localJarFile.toString()),
      new File(localJarFile.getParent().toString()),
      localJobConf.getJarUnpackPattern());
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:44,代码来源:TaskTracker.java

示例5: submitAndMonitorJob

import org.apache.hadoop.util.RunJar; //导入方法依赖的package包/类
public int submitAndMonitorJob() throws IOException {

    if (jar_ != null && isLocalHadoop()) {
      // getAbs became required when shell and subvm have different working dirs...
      File wd = new File(".").getAbsoluteFile();
      RunJar.unJar(new File(jar_), wd);
    }

    // if jobConf_ changes must recreate a JobClient
    jc_ = new JobClient(jobConf_);
    running_ = null;
    try {
      running_ = jc_.submitJob(jobConf_);
      jobId_ = running_.getID();
      if (background_) {
        LOG.info("Job is running in background.");
      } else if (!jc_.monitorAndPrintJob(jobConf_, running_)) {
        LOG.error("Job not successful!");
        return 1;
      }
      LOG.info("Output directory: " + output_);
    } catch(FileNotFoundException fe) {
      LOG.error("Error launching job , bad input path : " + fe.getMessage());
      return 2;
    } catch(InvalidJobConfException je) {
      LOG.error("Error launching job , Invalid job conf : " + je.getMessage());
      return 3;
    } catch(FileAlreadyExistsException fae) {
      LOG.error("Error launching job , Output path already exists : "
                + fae.getMessage());
      return 4;
    } catch(IOException ioe) {
      LOG.error("Error Launching job : " + ioe.getMessage());
      return 5;
    } catch (InterruptedException ie) {
      LOG.error("Error monitoring job : " + ie.getMessage());
      return 6;
    } finally {
      jc_.close();
    }
    return 0;
  }
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:StreamJob.java

示例6: submitAndMonitorJob

import org.apache.hadoop.util.RunJar; //导入方法依赖的package包/类
public int submitAndMonitorJob() throws IOException {

    if (jar_ != null && isLocalHadoop()) {
      // getAbs became required when shell and subvm have different working dirs...
      File wd = new File(".").getAbsoluteFile();
      RunJar.unJar(new File(jar_), wd);
    }

    // if jobConf_ changes must recreate a JobClient
    jc_ = new JobClient(jobConf_);
    running_ = null;
    try {
      running_ = jc_.submitJob(jobConf_);
      jobId_ = running_.getID();
      if (background_) {
        LOG.info("Job is running in background.");
      } else if (!jc_.monitorAndPrintJob(jobConf_, running_)) {
        LOG.error("Job not Successful!");
        return 1;
      }
      LOG.info("Output directory: " + output_);
    } catch(FileNotFoundException fe) {
      LOG.error("Error launching job , bad input path : " + fe.getMessage());
      return 2;
    } catch(InvalidJobConfException je) {
      LOG.error("Error launching job , Invalid job conf : " + je.getMessage());
      return 3;
    } catch(FileAlreadyExistsException fae) {
      LOG.error("Error launching job , Output path already exists : "
                + fae.getMessage());
      return 4;
    } catch(IOException ioe) {
      LOG.error("Error Launching job : " + ioe.getMessage());
      return 5;
    } catch (InterruptedException ie) {
      LOG.error("Error monitoring job : " + ie.getMessage());
      return 6;
    } finally {
      jc_.close();
    }
    return 0;
  }
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:43,代码来源:StreamJob.java

示例7: processFiles

import org.apache.hadoop.util.RunJar; //导入方法依赖的package包/类
/**
 * Process list of resources.
 *
 * @param jobLocDir Job working directory.
 * @param files Array of {@link URI} or {@link org.apache.hadoop.fs.Path} to process resources.
 * @param download {@code true}, if need to download. Process class path only else.
 * @param extract {@code true}, if need to extract archive.
 * @param clsPathUrls Collection to add resource as classpath resource.
 * @param rsrcNameProp Property for resource name array setting.
 * @throws IOException If failed.
 */
private void processFiles(File jobLocDir, @Nullable Object[] files, boolean download, boolean extract,
    @Nullable Collection<URL> clsPathUrls, @Nullable String rsrcNameProp) throws IOException {
    if (F.isEmptyOrNulls(files))
        return;

    Collection<String> res = new ArrayList<>();

    for (Object pathObj : files) {
        Path srcPath;

        if (pathObj instanceof URI) {
            URI uri = (URI)pathObj;

            srcPath = new Path(uri);
        }
        else
            srcPath = (Path)pathObj;

        String locName = srcPath.getName();

        File dstPath = new File(jobLocDir.getAbsolutePath(), locName);

        res.add(locName);

        rsrcSet.add(dstPath);

        if (clsPathUrls != null)
            clsPathUrls.add(dstPath.toURI().toURL());

        if (!download)
            continue;

        JobConf cfg = ctx.getJobConf();

        FileSystem dstFs = FileSystem.getLocal(cfg);

        FileSystem srcFs = job.fileSystem(srcPath.toUri(), cfg);

        if (extract) {
            File archivesPath = new File(jobLocDir.getAbsolutePath(), ".cached-archives");

            if (!archivesPath.exists() && !archivesPath.mkdir())
                throw new IOException("Failed to create directory " +
                    "[path=" + archivesPath + ", jobId=" + jobId + ']');

            File archiveFile = new File(archivesPath, locName);

            FileUtil.copy(srcFs, srcPath, dstFs, new Path(archiveFile.toString()), false, cfg);

            String archiveNameLC = archiveFile.getName().toLowerCase();

            if (archiveNameLC.endsWith(".jar"))
                RunJar.unJar(archiveFile, dstPath);
            else if (archiveNameLC.endsWith(".zip"))
                FileUtil.unZip(archiveFile, dstPath);
            else if (archiveNameLC.endsWith(".tar.gz") ||
                archiveNameLC.endsWith(".tgz") ||
                archiveNameLC.endsWith(".tar"))
                FileUtil.unTar(archiveFile, dstPath);
            else
                throw new IOException("Cannot unpack archive [path=" + srcPath + ", jobId=" + jobId + ']');
        }
        else
            FileUtil.copy(srcFs, srcPath, dstFs, new Path(dstPath.toString()), false, cfg);
    }

    if (!res.isEmpty() && rsrcNameProp != null)
        ctx.getJobConf().setStrings(rsrcNameProp, res.toArray(new String[res.size()]));
}
 
开发者ID:apache,项目名称:ignite,代码行数:81,代码来源:HadoopV2JobResourceManager.java

示例8: localizeCache

import org.apache.hadoop.util.RunJar; //导入方法依赖的package包/类
Path localizeCache(Configuration conf,
                                  URI cache, long confFileStamp,
                                  CacheStatus cacheStatus,
                                  boolean isArchive, boolean isPublic)
throws IOException {
  FileSystem fs = FileSystem.get(cache, conf);
  FileSystem localFs = FileSystem.getLocal(conf);
  Path parchive = null;
  if (isArchive) {
    parchive = new Path(cacheStatus.localizedLoadPath,
      new Path(cacheStatus.localizedLoadPath.getName()));
  } else {
    parchive = cacheStatus.localizedLoadPath;
  }

  if (!localFs.mkdirs(parchive.getParent())) {
    throw new IOException("Mkdirs failed to create directory " +
        cacheStatus.localizedLoadPath.toString());
  }

  String cacheId = cache.getPath();
  fs.copyToLocalFile(new Path(cacheId), parchive);
  if (isArchive) {
    String tmpArchive = parchive.toString().toLowerCase();
    File srcFile = new File(parchive.toString());
    File destDir = new File(parchive.getParent().toString());
    LOG.info(String.format("Extracting %s to %s",
        srcFile.toString(), destDir.toString()));
    if (tmpArchive.endsWith(".jar")) {
      RunJar.unJar(srcFile, destDir);
    } else if (tmpArchive.endsWith(".zip")) {
      FileUtil.unZip(srcFile, destDir);
    } else if (isTarFile(tmpArchive)) {
      FileUtil.unTar(srcFile, destDir);
    } else {
      LOG.warn(String.format(
          "Cache file %s specified as archive, but not valid extension.", 
          srcFile.toString()));
      // else will not do anyhting
      // and copy the file into the dir as it is
    }
  }

  long cacheSize = 
    FileUtil.getDU(new File(parchive.getParent().toString()));
  cacheStatus.size = cacheSize;
  // Increase the size and sub directory count of the cache
  // from baseDirSize and baseDirNumberSubDir.
  baseDirManager.addCacheUpdate(cacheStatus);

  // set proper permissions for the localized directory
  setPermissions(conf, cacheStatus, isPublic);

  // update cacheStatus to reflect the newly cached file
  cacheStatus.mtime = getTimestamp(conf, cache);

  LOG.info(String.format("Cached %s as %s",
           cache.toString(), cacheStatus.localizedLoadPath));
  return cacheStatus.localizedLoadPath;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:61,代码来源:TrackerDistributedCacheManager.java

示例9: submitAndMonitorJob

import org.apache.hadoop.util.RunJar; //导入方法依赖的package包/类
public int submitAndMonitorJob() throws IOException {

    if (jar_ != null && isLocalHadoop()) {
      // getAbs became required when shell and subvm have different working dirs...
      File wd = new File(".").getAbsoluteFile();
      RunJar.unJar(new File(jar_), wd);
    }

    // if jobConf_ changes must recreate a JobClient
    jc_ = new JobClient(jobConf_);
    running_ = null;
    try {
      running_ = jc_.submitJob(jobConf_);
      jobId_ = running_.getID();
      jobInfo();
      if (background_) {
        LOG.info("Job is running in background.");
      } else if (!jc_.monitorAndPrintJob(jobConf_, running_)) {
        LOG.error("Job not Successful!");
        return 1;
      }
      LOG.info("Output directory: " + output_);
    } catch(FileNotFoundException fe) {
      LOG.error("Error launching job , bad input path : " + fe.getMessage());
      return 2;
    } catch(InvalidJobConfException je) {
      LOG.error("Error launching job , Invalid job conf : " + je.getMessage());
      return 3;
    } catch(FileAlreadyExistsException fae) {
      LOG.error("Error launching job , Output path already exists : " 
                + fae.getMessage());
      return 4;
    } catch(IOException ioe) {
      LOG.error("Error Launching job : " + ioe.getMessage());
      return 5;
    } catch (InterruptedException ie) {
      LOG.error("Error monitoring job : " + ie.getMessage());
      return 6;
    } finally {
      jc_.close();
    }
    return 0;
  }
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:44,代码来源:StreamJob.java


注:本文中的org.apache.hadoop.util.RunJar.unJar方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。